From 6e3168684426462429749475f93bb9d6682f2c93 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 15:39:33 +0000 Subject: [PATCH 1/2] Release 0.8.39 --- poetry.lock | 282 +- pyproject.toml | 3 + src/humanloop/__init__.py | 872 ---- src/humanloop/agents/__init__.py | 51 - src/humanloop/agents/client.py | 2946 ------------ src/humanloop/agents/raw_client.py | 4021 ----------------- src/humanloop/agents/requests/__init__.py | 27 - .../requests/agent_log_request_agent.py | 7 - .../requests/agent_log_request_tool_choice.py | 9 - .../agent_request_reasoning_effort.py | 7 - .../agents/requests/agent_request_stop.py | 5 - .../agents/requests/agent_request_template.py | 7 - .../requests/agent_request_tools_item.py | 8 - .../requests/agents_call_request_agent.py | 7 - .../agents_call_request_tool_choice.py | 9 - .../agents_call_stream_request_agent.py | 7 - .../agents_call_stream_request_tool_choice.py | 9 - src/humanloop/agents/types/__init__.py | 27 - .../agents/types/agent_log_request_agent.py | 7 - .../types/agent_log_request_tool_choice.py | 9 - .../types/agent_request_reasoning_effort.py | 7 - .../agents/types/agent_request_stop.py | 5 - .../agents/types/agent_request_template.py | 7 - .../agents/types/agent_request_tools_item.py | 8 - .../agents/types/agents_call_request_agent.py | 7 - .../types/agents_call_request_tool_choice.py | 9 - .../types/agents_call_stream_request_agent.py | 7 - .../agents_call_stream_request_tool_choice.py | 9 - src/humanloop/base_client.py | 170 - src/humanloop/core/__init__.py | 59 - src/humanloop/core/api_error.py | 23 - src/humanloop/core/client_wrapper.py | 55 - src/humanloop/core/datetime_utils.py | 28 - src/humanloop/core/file.py | 67 - src/humanloop/core/http_client.py | 497 -- src/humanloop/core/http_response.py | 55 - src/humanloop/core/jsonable_encoder.py | 100 - src/humanloop/core/pagination.py | 82 - src/humanloop/core/pydantic_utilities.py | 255 -- src/humanloop/core/query_encoder.py | 58 - src/humanloop/core/remove_none_from_dict.py | 11 - src/humanloop/core/request_options.py | 35 - src/humanloop/core/serialization.py | 276 -- src/humanloop/core/unchecked_base_model.py | 303 -- src/humanloop/datasets/__init__.py | 7 - src/humanloop/datasets/client.py | 1330 ------ src/humanloop/datasets/raw_client.py | 1924 -------- src/humanloop/datasets/types/__init__.py | 9 - ...versions_get_request_include_datapoints.py | 7 - src/humanloop/directories/__init__.py | 4 - src/humanloop/directories/client.py | 385 -- src/humanloop/directories/raw_client.py | 596 --- src/humanloop/environment.py | 7 - src/humanloop/errors/__init__.py | 7 - .../errors/unprocessable_entity_error.py | 11 - src/humanloop/evaluations/__init__.py | 27 - src/humanloop/evaluations/client.py | 1177 ----- src/humanloop/evaluations/raw_client.py | 1845 -------- .../evaluations/requests/__init__.py | 15 - .../add_evaluators_request_evaluators_item.py | 11 - ...eate_evaluation_request_evaluators_item.py | 11 - .../requests/create_run_request_dataset.py | 9 - .../requests/create_run_request_version.py | 9 - src/humanloop/evaluations/types/__init__.py | 15 - .../add_evaluators_request_evaluators_item.py | 9 - ...eate_evaluation_request_evaluators_item.py | 9 - .../types/create_run_request_dataset.py | 9 - .../types/create_run_request_version.py | 9 - src/humanloop/evaluators/__init__.py | 19 - src/humanloop/evaluators/client.py | 1411 ------ src/humanloop/evaluators/raw_client.py | 2014 --------- src/humanloop/evaluators/requests/__init__.py | 13 - .../create_evaluator_log_request_judgment.py | 5 - .../create_evaluator_log_request_spec.py | 12 - .../requests/evaluator_request_spec.py | 12 - src/humanloop/evaluators/types/__init__.py | 9 - .../create_evaluator_log_request_judgment.py | 5 - .../create_evaluator_log_request_spec.py | 12 - .../types/evaluator_request_spec.py | 12 - src/humanloop/files/__init__.py | 8 - src/humanloop/files/client.py | 301 -- src/humanloop/files/raw_client.py | 382 -- src/humanloop/files/requests/__init__.py | 7 - ...th_files_retrieve_by_path_post_response.py | 19 - src/humanloop/files/types/__init__.py | 7 - ...th_files_retrieve_by_path_post_response.py | 14 - src/humanloop/flows/__init__.py | 4 - src/humanloop/flows/client.py | 1583 ------- src/humanloop/flows/raw_client.py | 2217 --------- src/humanloop/logs/__init__.py | 4 - src/humanloop/logs/client.py | 360 -- src/humanloop/logs/raw_client.py | 501 -- src/humanloop/prompts/__init__.py | 51 - src/humanloop/prompts/client.py | 2990 ------------ src/humanloop/prompts/raw_client.py | 3977 ---------------- src/humanloop/prompts/requests/__init__.py | 27 - .../requests/prompt_log_request_prompt.py | 7 - .../prompt_log_request_tool_choice.py | 9 - .../prompt_log_update_request_tool_choice.py | 9 - .../prompt_request_reasoning_effort.py | 7 - .../prompts/requests/prompt_request_stop.py | 5 - .../requests/prompt_request_template.py | 7 - .../requests/prompts_call_request_prompt.py | 7 - .../prompts_call_request_tool_choice.py | 9 - .../prompts_call_stream_request_prompt.py | 7 - ...prompts_call_stream_request_tool_choice.py | 9 - src/humanloop/prompts/types/__init__.py | 27 - .../types/prompt_log_request_prompt.py | 7 - .../types/prompt_log_request_tool_choice.py | 9 - .../prompt_log_update_request_tool_choice.py | 9 - .../types/prompt_request_reasoning_effort.py | 7 - .../prompts/types/prompt_request_stop.py | 5 - .../prompts/types/prompt_request_template.py | 7 - .../types/prompts_call_request_prompt.py | 7 - .../types/prompts_call_request_tool_choice.py | 9 - .../prompts_call_stream_request_prompt.py | 7 - ...prompts_call_stream_request_tool_choice.py | 9 - src/humanloop/py.typed | 0 src/humanloop/requests/__init__.py | 339 -- src/humanloop/requests/agent_call_response.py | 202 - .../agent_call_response_tool_choice.py | 9 - .../requests/agent_call_stream_response.py | 19 - .../agent_call_stream_response_payload.py | 9 - .../requests/agent_config_response.py | 7 - .../requests/agent_continue_call_response.py | 202 - ...gent_continue_call_response_tool_choice.py | 9 - .../agent_continue_call_stream_response.py | 19 - ...t_continue_call_stream_response_payload.py | 9 - src/humanloop/requests/agent_inline_tool.py | 13 - .../requests/agent_kernel_request.py | 112 - .../agent_kernel_request_reasoning_effort.py | 7 - .../requests/agent_kernel_request_stop.py | 5 - .../requests/agent_kernel_request_template.py | 7 - .../agent_kernel_request_tools_item.py | 8 - .../requests/agent_linked_file_request.py | 13 - .../requests/agent_linked_file_response.py | 19 - .../agent_linked_file_response_file.py | 22 - src/humanloop/requests/agent_log_response.py | 201 - .../agent_log_response_tool_choice.py | 9 - .../requests/agent_log_stream_response.py | 87 - src/humanloop/requests/agent_response.py | 242 - .../agent_response_reasoning_effort.py | 7 - src/humanloop/requests/agent_response_stop.py | 5 - .../requests/agent_response_template.py | 7 - .../requests/agent_response_tools_item.py | 11 - .../anthropic_redacted_thinking_content.py | 13 - .../requests/anthropic_thinking_content.py | 18 - .../boolean_evaluator_stats_response.py | 45 - src/humanloop/requests/chat_message.py | 41 - .../requests/chat_message_content.py | 7 - .../requests/chat_message_content_item.py | 8 - .../requests/chat_message_thinking_item.py | 8 - .../requests/code_evaluator_request.py | 48 - .../requests/create_agent_log_response.py | 30 - .../requests/create_datapoint_request.py | 24 - .../create_datapoint_request_target_value.py | 7 - .../requests/create_evaluator_log_response.py | 25 - .../requests/create_flow_log_response.py | 30 - .../requests/create_prompt_log_response.py | 25 - .../requests/create_tool_log_response.py | 25 - .../requests/dashboard_configuration.py | 12 - src/humanloop/requests/datapoint_response.py | 29 - .../datapoint_response_target_value.py | 7 - src/humanloop/requests/dataset_response.py | 102 - src/humanloop/requests/directory_response.py | 46 - ...tory_with_parents_and_children_response.py | 64 - ...arents_and_children_response_files_item.py | 19 - .../requests/environment_response.py | 13 - src/humanloop/requests/evaluatee_request.py | 45 - src/humanloop/requests/evaluatee_response.py | 33 - .../requests/evaluation_evaluator_response.py | 19 - .../requests/evaluation_log_response.py | 29 - src/humanloop/requests/evaluation_response.py | 43 - .../requests/evaluation_run_response.py | 56 - .../requests/evaluation_runs_response.py | 13 - src/humanloop/requests/evaluation_stats.py | 29 - ...aluator_activation_deactivation_request.py | 25 - ...tion_deactivation_request_activate_item.py | 10 - ...on_deactivation_request_deactivate_item.py | 10 - src/humanloop/requests/evaluator_aggregate.py | 25 - .../requests/evaluator_config_response.py | 7 - src/humanloop/requests/evaluator_file_id.py | 24 - src/humanloop/requests/evaluator_file_path.py | 24 - .../evaluator_judgment_number_limit.py | 20 - .../evaluator_judgment_option_response.py | 16 - .../requests/evaluator_log_response.py | 176 - .../evaluator_log_response_judgment.py | 5 - src/humanloop/requests/evaluator_response.py | 122 - .../requests/evaluator_response_spec.py | 12 - .../requests/evaluator_version_id.py | 19 - .../requests/external_evaluator_request.py | 44 - .../requests/file_environment_response.py | 24 - .../file_environment_response_file.py | 19 - .../file_environment_variable_request.py | 15 - src/humanloop/requests/file_id.py | 19 - src/humanloop/requests/file_path.py | 19 - src/humanloop/requests/file_request.py | 15 - src/humanloop/requests/flow_kernel_request.py | 12 - src/humanloop/requests/flow_log_response.py | 161 - src/humanloop/requests/flow_response.py | 109 - src/humanloop/requests/function_tool.py | 12 - .../requests/function_tool_choice.py | 11 - .../requests/http_validation_error.py | 10 - .../requests/human_evaluator_request.py | 48 - src/humanloop/requests/image_chat_content.py | 14 - src/humanloop/requests/image_url.py | 16 - src/humanloop/requests/input_response.py | 10 - src/humanloop/requests/linked_file_request.py | 9 - .../requests/linked_tool_response.py | 37 - src/humanloop/requests/list_agents.py | 13 - src/humanloop/requests/list_datasets.py | 13 - src/humanloop/requests/list_evaluators.py | 13 - src/humanloop/requests/list_flows.py | 13 - src/humanloop/requests/list_prompts.py | 13 - src/humanloop/requests/list_tools.py | 13 - .../requests/llm_evaluator_request.py | 49 - src/humanloop/requests/log_response.py | 19 - src/humanloop/requests/log_stream_response.py | 8 - ...onitoring_evaluator_environment_request.py | 15 - .../requests/monitoring_evaluator_response.py | 33 - .../monitoring_evaluator_version_request.py | 10 - .../numeric_evaluator_stats_response.py | 42 - src/humanloop/requests/overall_stats.py | 30 - .../requests/paginated_data_agent_response.py | 13 - .../paginated_data_evaluation_log_response.py | 13 - .../paginated_data_evaluator_response.py | 13 - .../requests/paginated_data_flow_response.py | 13 - .../requests/paginated_data_log_response.py | 13 - .../paginated_data_prompt_response.py | 13 - .../requests/paginated_data_tool_response.py | 13 - ...r_response_flow_response_agent_response.py | 19 - ...ow_response_agent_response_records_item.py | 19 - .../requests/paginated_datapoint_response.py | 13 - .../requests/paginated_dataset_response.py | 13 - .../requests/paginated_evaluation_response.py | 13 - .../requests/populate_template_response.py | 229 - ...te_template_response_populated_template.py | 7 - ...late_template_response_reasoning_effort.py | 7 - .../populate_template_response_stop.py | 5 - .../populate_template_response_template.py | 7 - .../requests/prompt_call_log_response.py | 77 - .../requests/prompt_call_response.py | 111 - .../prompt_call_response_tool_choice.py | 9 - .../requests/prompt_call_stream_response.py | 92 - .../requests/prompt_kernel_request.py | 116 - .../prompt_kernel_request_reasoning_effort.py | 7 - .../requests/prompt_kernel_request_stop.py | 5 - .../prompt_kernel_request_template.py | 7 - src/humanloop/requests/prompt_log_response.py | 201 - .../prompt_log_response_tool_choice.py | 9 - src/humanloop/requests/prompt_response.py | 227 - .../prompt_response_reasoning_effort.py | 7 - .../requests/prompt_response_stop.py | 5 - .../requests/prompt_response_template.py | 7 - src/humanloop/requests/provider_api_keys.py | 16 - src/humanloop/requests/response_format.py | 18 - src/humanloop/requests/run_stats_response.py | 43 - ...run_stats_response_evaluator_stats_item.py | 15 - .../requests/run_version_response.py | 13 - .../select_evaluator_stats_response.py | 41 - src/humanloop/requests/text_chat_content.py | 13 - .../requests/text_evaluator_stats_response.py | 35 - src/humanloop/requests/tool_call.py | 15 - src/humanloop/requests/tool_call_response.py | 146 - src/humanloop/requests/tool_choice.py | 14 - src/humanloop/requests/tool_function.py | 27 - src/humanloop/requests/tool_kernel_request.py | 28 - src/humanloop/requests/tool_log_response.py | 156 - src/humanloop/requests/tool_response.py | 145 - .../requests/update_version_request.py | 15 - src/humanloop/requests/validation_error.py | 12 - .../requests/validation_error_loc_item.py | 5 - .../requests/version_deployment_response.py | 29 - .../version_deployment_response_file.py | 22 - src/humanloop/requests/version_id.py | 10 - src/humanloop/requests/version_id_response.py | 23 - .../requests/version_id_response_version.py | 22 - .../requests/version_reference_response.py | 10 - .../requests/version_stats_response.py | 28 - ...s_response_evaluator_version_stats_item.py | 15 - src/humanloop/tools/__init__.py | 4 - src/humanloop/tools/client.py | 2101 --------- src/humanloop/tools/raw_client.py | 2917 ------------ src/humanloop/types/__init__.py | 411 -- src/humanloop/types/agent_call_response.py | 231 - .../types/agent_call_response_tool_choice.py | 9 - .../types/agent_call_stream_response.py | 51 - .../agent_call_stream_response_payload.py | 9 - src/humanloop/types/agent_config_response.py | 18 - .../types/agent_continue_call_response.py | 231 - ...gent_continue_call_response_tool_choice.py | 9 - .../agent_continue_call_stream_response.py | 51 - ...t_continue_call_stream_response_payload.py | 9 - src/humanloop/types/agent_inline_tool.py | 24 - src/humanloop/types/agent_kernel_request.py | 123 - .../agent_kernel_request_reasoning_effort.py | 7 - .../types/agent_kernel_request_stop.py | 5 - .../types/agent_kernel_request_template.py | 7 - .../types/agent_kernel_request_tools_item.py | 8 - .../types/agent_linked_file_request.py | 24 - .../types/agent_linked_file_response.py | 40 - .../types/agent_linked_file_response_file.py | 17 - src/humanloop/types/agent_log_response.py | 225 - .../types/agent_log_response_tool_choice.py | 9 - .../types/agent_log_stream_response.py | 99 - src/humanloop/types/agent_response.py | 266 -- .../types/agent_response_reasoning_effort.py | 7 - src/humanloop/types/agent_response_stop.py | 5 - .../types/agent_response_template.py | 7 - .../types/agent_response_tools_item.py | 11 - .../anthropic_redacted_thinking_content.py | 24 - .../types/anthropic_thinking_content.py | 29 - .../types/base_models_user_response.py | 5 - .../types/boolean_evaluator_stats_response.py | 58 - src/humanloop/types/chat_message.py | 52 - src/humanloop/types/chat_message_content.py | 7 - .../types/chat_message_content_item.py | 8 - .../types/chat_message_thinking_item.py | 8 - src/humanloop/types/chat_role.py | 5 - src/humanloop/types/chat_tool_type.py | 5 - src/humanloop/types/code_evaluator_request.py | 59 - src/humanloop/types/config_tool_response.py | 5 - .../types/create_agent_log_response.py | 43 - .../types/create_datapoint_request.py | 35 - .../create_datapoint_request_target_value.py | 7 - .../types/create_evaluator_log_response.py | 38 - .../types/create_flow_log_response.py | 43 - .../types/create_prompt_log_response.py | 38 - .../types/create_tool_log_response.py | 38 - .../types/dashboard_configuration.py | 23 - src/humanloop/types/datapoint_response.py | 40 - .../types/datapoint_response_target_value.py | 7 - src/humanloop/types/dataset_response.py | 117 - src/humanloop/types/datasets_request.py | 5 - src/humanloop/types/directory_response.py | 57 - ...tory_with_parents_and_children_response.py | 88 - ...arents_and_children_response_files_item.py | 14 - src/humanloop/types/environment_response.py | 25 - src/humanloop/types/environment_tag.py | 5 - src/humanloop/types/evaluatee_request.py | 58 - src/humanloop/types/evaluatee_response.py | 60 - .../types/evaluation_evaluator_response.py | 45 - .../types/evaluation_log_response.py | 60 - src/humanloop/types/evaluation_response.py | 69 - .../types/evaluation_run_response.py | 83 - .../types/evaluation_runs_response.py | 39 - src/humanloop/types/evaluation_stats.py | 40 - src/humanloop/types/evaluation_status.py | 5 - .../types/evaluations_dataset_request.py | 5 - src/humanloop/types/evaluations_request.py | 5 - ...aluator_activation_deactivation_request.py | 36 - ...tion_deactivation_request_activate_item.py | 10 - ...on_deactivation_request_deactivate_item.py | 10 - src/humanloop/types/evaluator_aggregate.py | 37 - .../types/evaluator_arguments_type.py | 5 - .../types/evaluator_config_response.py | 18 - src/humanloop/types/evaluator_file_id.py | 37 - src/humanloop/types/evaluator_file_path.py | 37 - .../types/evaluator_judgment_number_limit.py | 33 - .../evaluator_judgment_option_response.py | 29 - src/humanloop/types/evaluator_log_response.py | 201 - .../types/evaluator_log_response_judgment.py | 5 - src/humanloop/types/evaluator_response.py | 146 - .../types/evaluator_response_spec.py | 12 - .../types/evaluator_return_type_enum.py | 7 - src/humanloop/types/evaluator_version_id.py | 32 - src/humanloop/types/evaluators_request.py | 5 - src/humanloop/types/event_type.py | 21 - .../types/external_evaluator_request.py | 55 - src/humanloop/types/feedback_type.py | 5 - .../types/file_environment_response.py | 51 - .../types/file_environment_response_file.py | 14 - .../file_environment_variable_request.py | 28 - src/humanloop/types/file_id.py | 32 - src/humanloop/types/file_path.py | 32 - src/humanloop/types/file_request.py | 28 - src/humanloop/types/file_sort_by.py | 5 - src/humanloop/types/file_type.py | 5 - src/humanloop/types/files_tool_type.py | 7 - src/humanloop/types/flow_kernel_request.py | 23 - src/humanloop/types/flow_log_response.py | 185 - src/humanloop/types/flow_response.py | 133 - src/humanloop/types/function_tool.py | 25 - src/humanloop/types/function_tool_choice.py | 24 - src/humanloop/types/http_validation_error.py | 21 - .../types/human_evaluator_request.py | 59 - .../human_evaluator_request_return_type.py | 7 - src/humanloop/types/image_chat_content.py | 25 - src/humanloop/types/image_url.py | 29 - src/humanloop/types/image_url_detail.py | 5 - src/humanloop/types/input_response.py | 23 - src/humanloop/types/linked_file_request.py | 22 - src/humanloop/types/linked_tool_response.py | 48 - src/humanloop/types/list_agents.py | 38 - src/humanloop/types/list_datasets.py | 24 - src/humanloop/types/list_evaluators.py | 38 - src/humanloop/types/list_flows.py | 38 - src/humanloop/types/list_prompts.py | 38 - src/humanloop/types/list_tools.py | 38 - src/humanloop/types/llm_evaluator_request.py | 60 - src/humanloop/types/log_response.py | 15 - src/humanloop/types/log_status.py | 5 - src/humanloop/types/log_stream_response.py | 8 - src/humanloop/types/model_endpoints.py | 5 - src/humanloop/types/model_providers.py | 10 - ...onitoring_evaluator_environment_request.py | 28 - .../types/monitoring_evaluator_response.py | 53 - .../types/monitoring_evaluator_state.py | 5 - .../monitoring_evaluator_version_request.py | 23 - .../types/numeric_evaluator_stats_response.py | 53 - src/humanloop/types/observability_status.py | 5 - src/humanloop/types/on_agent_call_enum.py | 5 - .../types/open_ai_reasoning_effort.py | 5 - src/humanloop/types/overall_stats.py | 43 - .../types/paginated_data_agent_response.py | 38 - .../paginated_data_evaluation_log_response.py | 44 - .../paginated_data_evaluator_response.py | 38 - .../types/paginated_data_flow_response.py | 38 - .../types/paginated_data_log_response.py | 44 - .../types/paginated_data_prompt_response.py | 38 - .../types/paginated_data_tool_response.py | 38 - ...r_response_flow_response_agent_response.py | 47 - ...ow_response_agent_response_records_item.py | 14 - .../types/paginated_datapoint_response.py | 24 - .../types/paginated_dataset_response.py | 24 - .../types/paginated_evaluation_response.py | 39 - .../types/paginated_prompt_log_response.py | 5 - .../types/paginated_session_response.py | 5 - src/humanloop/types/platform_access_enum.py | 5 - .../types/populate_template_response.py | 258 -- ...te_template_response_populated_template.py | 7 - ...late_template_response_reasoning_effort.py | 7 - .../types/populate_template_response_stop.py | 5 - .../populate_template_response_template.py | 7 - .../types/prompt_call_log_response.py | 89 - src/humanloop/types/prompt_call_response.py | 136 - .../types/prompt_call_response_tool_choice.py | 9 - .../types/prompt_call_stream_response.py | 104 - src/humanloop/types/prompt_kernel_request.py | 127 - .../prompt_kernel_request_reasoning_effort.py | 7 - .../types/prompt_kernel_request_stop.py | 5 - .../types/prompt_kernel_request_template.py | 7 - src/humanloop/types/prompt_log_response.py | 225 - .../types/prompt_log_response_tool_choice.py | 9 - src/humanloop/types/prompt_response.py | 251 - .../types/prompt_response_reasoning_effort.py | 7 - src/humanloop/types/prompt_response_stop.py | 5 - .../types/prompt_response_template.py | 7 - src/humanloop/types/provider_api_keys.py | 30 - src/humanloop/types/response_format.py | 29 - src/humanloop/types/response_format_type.py | 5 - src/humanloop/types/run_stats_response.py | 54 - ...run_stats_response_evaluator_stats_item.py | 15 - src/humanloop/types/run_version_response.py | 11 - .../types/select_evaluator_stats_response.py | 52 - src/humanloop/types/sort_order.py | 5 - src/humanloop/types/template_language.py | 5 - src/humanloop/types/text_chat_content.py | 24 - .../types/text_evaluator_stats_response.py | 48 - src/humanloop/types/time_unit.py | 5 - src/humanloop/types/tool_call.py | 28 - src/humanloop/types/tool_call_response.py | 175 - src/humanloop/types/tool_choice.py | 27 - src/humanloop/types/tool_function.py | 38 - src/humanloop/types/tool_kernel_request.py | 39 - src/humanloop/types/tool_log_response.py | 180 - src/humanloop/types/tool_response.py | 165 - src/humanloop/types/update_dateset_action.py | 5 - .../types/update_evaluation_status_request.py | 5 - src/humanloop/types/update_version_request.py | 28 - src/humanloop/types/user_response.py | 5 - src/humanloop/types/valence.py | 5 - src/humanloop/types/validation_error.py | 23 - .../types/validation_error_loc_item.py | 5 - .../types/version_deployment_response.py | 50 - .../types/version_deployment_response_file.py | 17 - src/humanloop/types/version_id.py | 23 - src/humanloop/types/version_id_response.py | 44 - .../types/version_id_response_version.py | 17 - .../types/version_reference_response.py | 10 - src/humanloop/types/version_stats_response.py | 39 - ...s_response_evaluator_version_stats_item.py | 15 - src/humanloop/types/version_status.py | 5 - src/humanloop/version.py | 3 - 484 files changed, 144 insertions(+), 53368 deletions(-) delete mode 100644 src/humanloop/__init__.py delete mode 100644 src/humanloop/agents/__init__.py delete mode 100644 src/humanloop/agents/client.py delete mode 100644 src/humanloop/agents/raw_client.py delete mode 100644 src/humanloop/agents/requests/__init__.py delete mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py delete mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py delete mode 100644 src/humanloop/agents/requests/agent_request_reasoning_effort.py delete mode 100644 src/humanloop/agents/requests/agent_request_stop.py delete mode 100644 src/humanloop/agents/requests/agent_request_template.py delete mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py delete mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py delete mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py delete mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py delete mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py delete mode 100644 src/humanloop/agents/types/__init__.py delete mode 100644 src/humanloop/agents/types/agent_log_request_agent.py delete mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py delete mode 100644 src/humanloop/agents/types/agent_request_reasoning_effort.py delete mode 100644 src/humanloop/agents/types/agent_request_stop.py delete mode 100644 src/humanloop/agents/types/agent_request_template.py delete mode 100644 src/humanloop/agents/types/agent_request_tools_item.py delete mode 100644 src/humanloop/agents/types/agents_call_request_agent.py delete mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py delete mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py delete mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py delete mode 100644 src/humanloop/base_client.py delete mode 100644 src/humanloop/core/__init__.py delete mode 100644 src/humanloop/core/api_error.py delete mode 100644 src/humanloop/core/client_wrapper.py delete mode 100644 src/humanloop/core/datetime_utils.py delete mode 100644 src/humanloop/core/file.py delete mode 100644 src/humanloop/core/http_client.py delete mode 100644 src/humanloop/core/http_response.py delete mode 100644 src/humanloop/core/jsonable_encoder.py delete mode 100644 src/humanloop/core/pagination.py delete mode 100644 src/humanloop/core/pydantic_utilities.py delete mode 100644 src/humanloop/core/query_encoder.py delete mode 100644 src/humanloop/core/remove_none_from_dict.py delete mode 100644 src/humanloop/core/request_options.py delete mode 100644 src/humanloop/core/serialization.py delete mode 100644 src/humanloop/core/unchecked_base_model.py delete mode 100644 src/humanloop/datasets/__init__.py delete mode 100644 src/humanloop/datasets/client.py delete mode 100644 src/humanloop/datasets/raw_client.py delete mode 100644 src/humanloop/datasets/types/__init__.py delete mode 100644 src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py delete mode 100644 src/humanloop/directories/__init__.py delete mode 100644 src/humanloop/directories/client.py delete mode 100644 src/humanloop/directories/raw_client.py delete mode 100644 src/humanloop/environment.py delete mode 100644 src/humanloop/errors/__init__.py delete mode 100644 src/humanloop/errors/unprocessable_entity_error.py delete mode 100644 src/humanloop/evaluations/__init__.py delete mode 100644 src/humanloop/evaluations/client.py delete mode 100644 src/humanloop/evaluations/raw_client.py delete mode 100644 src/humanloop/evaluations/requests/__init__.py delete mode 100644 src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py delete mode 100644 src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py delete mode 100644 src/humanloop/evaluations/requests/create_run_request_dataset.py delete mode 100644 src/humanloop/evaluations/requests/create_run_request_version.py delete mode 100644 src/humanloop/evaluations/types/__init__.py delete mode 100644 src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py delete mode 100644 src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py delete mode 100644 src/humanloop/evaluations/types/create_run_request_dataset.py delete mode 100644 src/humanloop/evaluations/types/create_run_request_version.py delete mode 100644 src/humanloop/evaluators/__init__.py delete mode 100644 src/humanloop/evaluators/client.py delete mode 100644 src/humanloop/evaluators/raw_client.py delete mode 100644 src/humanloop/evaluators/requests/__init__.py delete mode 100644 src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py delete mode 100644 src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py delete mode 100644 src/humanloop/evaluators/requests/evaluator_request_spec.py delete mode 100644 src/humanloop/evaluators/types/__init__.py delete mode 100644 src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py delete mode 100644 src/humanloop/evaluators/types/create_evaluator_log_request_spec.py delete mode 100644 src/humanloop/evaluators/types/evaluator_request_spec.py delete mode 100644 src/humanloop/files/__init__.py delete mode 100644 src/humanloop/files/client.py delete mode 100644 src/humanloop/files/raw_client.py delete mode 100644 src/humanloop/files/requests/__init__.py delete mode 100644 src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py delete mode 100644 src/humanloop/files/types/__init__.py delete mode 100644 src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py delete mode 100644 src/humanloop/flows/__init__.py delete mode 100644 src/humanloop/flows/client.py delete mode 100644 src/humanloop/flows/raw_client.py delete mode 100644 src/humanloop/logs/__init__.py delete mode 100644 src/humanloop/logs/client.py delete mode 100644 src/humanloop/logs/raw_client.py delete mode 100644 src/humanloop/prompts/__init__.py delete mode 100644 src/humanloop/prompts/client.py delete mode 100644 src/humanloop/prompts/raw_client.py delete mode 100644 src/humanloop/prompts/requests/__init__.py delete mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py delete mode 100644 src/humanloop/prompts/requests/prompt_log_request_tool_choice.py delete mode 100644 src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py delete mode 100644 src/humanloop/prompts/requests/prompt_request_reasoning_effort.py delete mode 100644 src/humanloop/prompts/requests/prompt_request_stop.py delete mode 100644 src/humanloop/prompts/requests/prompt_request_template.py delete mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py delete mode 100644 src/humanloop/prompts/requests/prompts_call_request_tool_choice.py delete mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py delete mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py delete mode 100644 src/humanloop/prompts/types/__init__.py delete mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py delete mode 100644 src/humanloop/prompts/types/prompt_log_request_tool_choice.py delete mode 100644 src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py delete mode 100644 src/humanloop/prompts/types/prompt_request_reasoning_effort.py delete mode 100644 src/humanloop/prompts/types/prompt_request_stop.py delete mode 100644 src/humanloop/prompts/types/prompt_request_template.py delete mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py delete mode 100644 src/humanloop/prompts/types/prompts_call_request_tool_choice.py delete mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py delete mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py delete mode 100644 src/humanloop/py.typed delete mode 100644 src/humanloop/requests/__init__.py delete mode 100644 src/humanloop/requests/agent_call_response.py delete mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py delete mode 100644 src/humanloop/requests/agent_call_stream_response.py delete mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py delete mode 100644 src/humanloop/requests/agent_config_response.py delete mode 100644 src/humanloop/requests/agent_continue_call_response.py delete mode 100644 src/humanloop/requests/agent_continue_call_response_tool_choice.py delete mode 100644 src/humanloop/requests/agent_continue_call_stream_response.py delete mode 100644 src/humanloop/requests/agent_continue_call_stream_response_payload.py delete mode 100644 src/humanloop/requests/agent_inline_tool.py delete mode 100644 src/humanloop/requests/agent_kernel_request.py delete mode 100644 src/humanloop/requests/agent_kernel_request_reasoning_effort.py delete mode 100644 src/humanloop/requests/agent_kernel_request_stop.py delete mode 100644 src/humanloop/requests/agent_kernel_request_template.py delete mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py delete mode 100644 src/humanloop/requests/agent_linked_file_request.py delete mode 100644 src/humanloop/requests/agent_linked_file_response.py delete mode 100644 src/humanloop/requests/agent_linked_file_response_file.py delete mode 100644 src/humanloop/requests/agent_log_response.py delete mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py delete mode 100644 src/humanloop/requests/agent_log_stream_response.py delete mode 100644 src/humanloop/requests/agent_response.py delete mode 100644 src/humanloop/requests/agent_response_reasoning_effort.py delete mode 100644 src/humanloop/requests/agent_response_stop.py delete mode 100644 src/humanloop/requests/agent_response_template.py delete mode 100644 src/humanloop/requests/agent_response_tools_item.py delete mode 100644 src/humanloop/requests/anthropic_redacted_thinking_content.py delete mode 100644 src/humanloop/requests/anthropic_thinking_content.py delete mode 100644 src/humanloop/requests/boolean_evaluator_stats_response.py delete mode 100644 src/humanloop/requests/chat_message.py delete mode 100644 src/humanloop/requests/chat_message_content.py delete mode 100644 src/humanloop/requests/chat_message_content_item.py delete mode 100644 src/humanloop/requests/chat_message_thinking_item.py delete mode 100644 src/humanloop/requests/code_evaluator_request.py delete mode 100644 src/humanloop/requests/create_agent_log_response.py delete mode 100644 src/humanloop/requests/create_datapoint_request.py delete mode 100644 src/humanloop/requests/create_datapoint_request_target_value.py delete mode 100644 src/humanloop/requests/create_evaluator_log_response.py delete mode 100644 src/humanloop/requests/create_flow_log_response.py delete mode 100644 src/humanloop/requests/create_prompt_log_response.py delete mode 100644 src/humanloop/requests/create_tool_log_response.py delete mode 100644 src/humanloop/requests/dashboard_configuration.py delete mode 100644 src/humanloop/requests/datapoint_response.py delete mode 100644 src/humanloop/requests/datapoint_response_target_value.py delete mode 100644 src/humanloop/requests/dataset_response.py delete mode 100644 src/humanloop/requests/directory_response.py delete mode 100644 src/humanloop/requests/directory_with_parents_and_children_response.py delete mode 100644 src/humanloop/requests/directory_with_parents_and_children_response_files_item.py delete mode 100644 src/humanloop/requests/environment_response.py delete mode 100644 src/humanloop/requests/evaluatee_request.py delete mode 100644 src/humanloop/requests/evaluatee_response.py delete mode 100644 src/humanloop/requests/evaluation_evaluator_response.py delete mode 100644 src/humanloop/requests/evaluation_log_response.py delete mode 100644 src/humanloop/requests/evaluation_response.py delete mode 100644 src/humanloop/requests/evaluation_run_response.py delete mode 100644 src/humanloop/requests/evaluation_runs_response.py delete mode 100644 src/humanloop/requests/evaluation_stats.py delete mode 100644 src/humanloop/requests/evaluator_activation_deactivation_request.py delete mode 100644 src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py delete mode 100644 src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py delete mode 100644 src/humanloop/requests/evaluator_aggregate.py delete mode 100644 src/humanloop/requests/evaluator_config_response.py delete mode 100644 src/humanloop/requests/evaluator_file_id.py delete mode 100644 src/humanloop/requests/evaluator_file_path.py delete mode 100644 src/humanloop/requests/evaluator_judgment_number_limit.py delete mode 100644 src/humanloop/requests/evaluator_judgment_option_response.py delete mode 100644 src/humanloop/requests/evaluator_log_response.py delete mode 100644 src/humanloop/requests/evaluator_log_response_judgment.py delete mode 100644 src/humanloop/requests/evaluator_response.py delete mode 100644 src/humanloop/requests/evaluator_response_spec.py delete mode 100644 src/humanloop/requests/evaluator_version_id.py delete mode 100644 src/humanloop/requests/external_evaluator_request.py delete mode 100644 src/humanloop/requests/file_environment_response.py delete mode 100644 src/humanloop/requests/file_environment_response_file.py delete mode 100644 src/humanloop/requests/file_environment_variable_request.py delete mode 100644 src/humanloop/requests/file_id.py delete mode 100644 src/humanloop/requests/file_path.py delete mode 100644 src/humanloop/requests/file_request.py delete mode 100644 src/humanloop/requests/flow_kernel_request.py delete mode 100644 src/humanloop/requests/flow_log_response.py delete mode 100644 src/humanloop/requests/flow_response.py delete mode 100644 src/humanloop/requests/function_tool.py delete mode 100644 src/humanloop/requests/function_tool_choice.py delete mode 100644 src/humanloop/requests/http_validation_error.py delete mode 100644 src/humanloop/requests/human_evaluator_request.py delete mode 100644 src/humanloop/requests/image_chat_content.py delete mode 100644 src/humanloop/requests/image_url.py delete mode 100644 src/humanloop/requests/input_response.py delete mode 100644 src/humanloop/requests/linked_file_request.py delete mode 100644 src/humanloop/requests/linked_tool_response.py delete mode 100644 src/humanloop/requests/list_agents.py delete mode 100644 src/humanloop/requests/list_datasets.py delete mode 100644 src/humanloop/requests/list_evaluators.py delete mode 100644 src/humanloop/requests/list_flows.py delete mode 100644 src/humanloop/requests/list_prompts.py delete mode 100644 src/humanloop/requests/list_tools.py delete mode 100644 src/humanloop/requests/llm_evaluator_request.py delete mode 100644 src/humanloop/requests/log_response.py delete mode 100644 src/humanloop/requests/log_stream_response.py delete mode 100644 src/humanloop/requests/monitoring_evaluator_environment_request.py delete mode 100644 src/humanloop/requests/monitoring_evaluator_response.py delete mode 100644 src/humanloop/requests/monitoring_evaluator_version_request.py delete mode 100644 src/humanloop/requests/numeric_evaluator_stats_response.py delete mode 100644 src/humanloop/requests/overall_stats.py delete mode 100644 src/humanloop/requests/paginated_data_agent_response.py delete mode 100644 src/humanloop/requests/paginated_data_evaluation_log_response.py delete mode 100644 src/humanloop/requests/paginated_data_evaluator_response.py delete mode 100644 src/humanloop/requests/paginated_data_flow_response.py delete mode 100644 src/humanloop/requests/paginated_data_log_response.py delete mode 100644 src/humanloop/requests/paginated_data_prompt_response.py delete mode 100644 src/humanloop/requests/paginated_data_tool_response.py delete mode 100644 src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py delete mode 100644 src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py delete mode 100644 src/humanloop/requests/paginated_datapoint_response.py delete mode 100644 src/humanloop/requests/paginated_dataset_response.py delete mode 100644 src/humanloop/requests/paginated_evaluation_response.py delete mode 100644 src/humanloop/requests/populate_template_response.py delete mode 100644 src/humanloop/requests/populate_template_response_populated_template.py delete mode 100644 src/humanloop/requests/populate_template_response_reasoning_effort.py delete mode 100644 src/humanloop/requests/populate_template_response_stop.py delete mode 100644 src/humanloop/requests/populate_template_response_template.py delete mode 100644 src/humanloop/requests/prompt_call_log_response.py delete mode 100644 src/humanloop/requests/prompt_call_response.py delete mode 100644 src/humanloop/requests/prompt_call_response_tool_choice.py delete mode 100644 src/humanloop/requests/prompt_call_stream_response.py delete mode 100644 src/humanloop/requests/prompt_kernel_request.py delete mode 100644 src/humanloop/requests/prompt_kernel_request_reasoning_effort.py delete mode 100644 src/humanloop/requests/prompt_kernel_request_stop.py delete mode 100644 src/humanloop/requests/prompt_kernel_request_template.py delete mode 100644 src/humanloop/requests/prompt_log_response.py delete mode 100644 src/humanloop/requests/prompt_log_response_tool_choice.py delete mode 100644 src/humanloop/requests/prompt_response.py delete mode 100644 src/humanloop/requests/prompt_response_reasoning_effort.py delete mode 100644 src/humanloop/requests/prompt_response_stop.py delete mode 100644 src/humanloop/requests/prompt_response_template.py delete mode 100644 src/humanloop/requests/provider_api_keys.py delete mode 100644 src/humanloop/requests/response_format.py delete mode 100644 src/humanloop/requests/run_stats_response.py delete mode 100644 src/humanloop/requests/run_stats_response_evaluator_stats_item.py delete mode 100644 src/humanloop/requests/run_version_response.py delete mode 100644 src/humanloop/requests/select_evaluator_stats_response.py delete mode 100644 src/humanloop/requests/text_chat_content.py delete mode 100644 src/humanloop/requests/text_evaluator_stats_response.py delete mode 100644 src/humanloop/requests/tool_call.py delete mode 100644 src/humanloop/requests/tool_call_response.py delete mode 100644 src/humanloop/requests/tool_choice.py delete mode 100644 src/humanloop/requests/tool_function.py delete mode 100644 src/humanloop/requests/tool_kernel_request.py delete mode 100644 src/humanloop/requests/tool_log_response.py delete mode 100644 src/humanloop/requests/tool_response.py delete mode 100644 src/humanloop/requests/update_version_request.py delete mode 100644 src/humanloop/requests/validation_error.py delete mode 100644 src/humanloop/requests/validation_error_loc_item.py delete mode 100644 src/humanloop/requests/version_deployment_response.py delete mode 100644 src/humanloop/requests/version_deployment_response_file.py delete mode 100644 src/humanloop/requests/version_id.py delete mode 100644 src/humanloop/requests/version_id_response.py delete mode 100644 src/humanloop/requests/version_id_response_version.py delete mode 100644 src/humanloop/requests/version_reference_response.py delete mode 100644 src/humanloop/requests/version_stats_response.py delete mode 100644 src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py delete mode 100644 src/humanloop/tools/__init__.py delete mode 100644 src/humanloop/tools/client.py delete mode 100644 src/humanloop/tools/raw_client.py delete mode 100644 src/humanloop/types/__init__.py delete mode 100644 src/humanloop/types/agent_call_response.py delete mode 100644 src/humanloop/types/agent_call_response_tool_choice.py delete mode 100644 src/humanloop/types/agent_call_stream_response.py delete mode 100644 src/humanloop/types/agent_call_stream_response_payload.py delete mode 100644 src/humanloop/types/agent_config_response.py delete mode 100644 src/humanloop/types/agent_continue_call_response.py delete mode 100644 src/humanloop/types/agent_continue_call_response_tool_choice.py delete mode 100644 src/humanloop/types/agent_continue_call_stream_response.py delete mode 100644 src/humanloop/types/agent_continue_call_stream_response_payload.py delete mode 100644 src/humanloop/types/agent_inline_tool.py delete mode 100644 src/humanloop/types/agent_kernel_request.py delete mode 100644 src/humanloop/types/agent_kernel_request_reasoning_effort.py delete mode 100644 src/humanloop/types/agent_kernel_request_stop.py delete mode 100644 src/humanloop/types/agent_kernel_request_template.py delete mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py delete mode 100644 src/humanloop/types/agent_linked_file_request.py delete mode 100644 src/humanloop/types/agent_linked_file_response.py delete mode 100644 src/humanloop/types/agent_linked_file_response_file.py delete mode 100644 src/humanloop/types/agent_log_response.py delete mode 100644 src/humanloop/types/agent_log_response_tool_choice.py delete mode 100644 src/humanloop/types/agent_log_stream_response.py delete mode 100644 src/humanloop/types/agent_response.py delete mode 100644 src/humanloop/types/agent_response_reasoning_effort.py delete mode 100644 src/humanloop/types/agent_response_stop.py delete mode 100644 src/humanloop/types/agent_response_template.py delete mode 100644 src/humanloop/types/agent_response_tools_item.py delete mode 100644 src/humanloop/types/anthropic_redacted_thinking_content.py delete mode 100644 src/humanloop/types/anthropic_thinking_content.py delete mode 100644 src/humanloop/types/base_models_user_response.py delete mode 100644 src/humanloop/types/boolean_evaluator_stats_response.py delete mode 100644 src/humanloop/types/chat_message.py delete mode 100644 src/humanloop/types/chat_message_content.py delete mode 100644 src/humanloop/types/chat_message_content_item.py delete mode 100644 src/humanloop/types/chat_message_thinking_item.py delete mode 100644 src/humanloop/types/chat_role.py delete mode 100644 src/humanloop/types/chat_tool_type.py delete mode 100644 src/humanloop/types/code_evaluator_request.py delete mode 100644 src/humanloop/types/config_tool_response.py delete mode 100644 src/humanloop/types/create_agent_log_response.py delete mode 100644 src/humanloop/types/create_datapoint_request.py delete mode 100644 src/humanloop/types/create_datapoint_request_target_value.py delete mode 100644 src/humanloop/types/create_evaluator_log_response.py delete mode 100644 src/humanloop/types/create_flow_log_response.py delete mode 100644 src/humanloop/types/create_prompt_log_response.py delete mode 100644 src/humanloop/types/create_tool_log_response.py delete mode 100644 src/humanloop/types/dashboard_configuration.py delete mode 100644 src/humanloop/types/datapoint_response.py delete mode 100644 src/humanloop/types/datapoint_response_target_value.py delete mode 100644 src/humanloop/types/dataset_response.py delete mode 100644 src/humanloop/types/datasets_request.py delete mode 100644 src/humanloop/types/directory_response.py delete mode 100644 src/humanloop/types/directory_with_parents_and_children_response.py delete mode 100644 src/humanloop/types/directory_with_parents_and_children_response_files_item.py delete mode 100644 src/humanloop/types/environment_response.py delete mode 100644 src/humanloop/types/environment_tag.py delete mode 100644 src/humanloop/types/evaluatee_request.py delete mode 100644 src/humanloop/types/evaluatee_response.py delete mode 100644 src/humanloop/types/evaluation_evaluator_response.py delete mode 100644 src/humanloop/types/evaluation_log_response.py delete mode 100644 src/humanloop/types/evaluation_response.py delete mode 100644 src/humanloop/types/evaluation_run_response.py delete mode 100644 src/humanloop/types/evaluation_runs_response.py delete mode 100644 src/humanloop/types/evaluation_stats.py delete mode 100644 src/humanloop/types/evaluation_status.py delete mode 100644 src/humanloop/types/evaluations_dataset_request.py delete mode 100644 src/humanloop/types/evaluations_request.py delete mode 100644 src/humanloop/types/evaluator_activation_deactivation_request.py delete mode 100644 src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py delete mode 100644 src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py delete mode 100644 src/humanloop/types/evaluator_aggregate.py delete mode 100644 src/humanloop/types/evaluator_arguments_type.py delete mode 100644 src/humanloop/types/evaluator_config_response.py delete mode 100644 src/humanloop/types/evaluator_file_id.py delete mode 100644 src/humanloop/types/evaluator_file_path.py delete mode 100644 src/humanloop/types/evaluator_judgment_number_limit.py delete mode 100644 src/humanloop/types/evaluator_judgment_option_response.py delete mode 100644 src/humanloop/types/evaluator_log_response.py delete mode 100644 src/humanloop/types/evaluator_log_response_judgment.py delete mode 100644 src/humanloop/types/evaluator_response.py delete mode 100644 src/humanloop/types/evaluator_response_spec.py delete mode 100644 src/humanloop/types/evaluator_return_type_enum.py delete mode 100644 src/humanloop/types/evaluator_version_id.py delete mode 100644 src/humanloop/types/evaluators_request.py delete mode 100644 src/humanloop/types/event_type.py delete mode 100644 src/humanloop/types/external_evaluator_request.py delete mode 100644 src/humanloop/types/feedback_type.py delete mode 100644 src/humanloop/types/file_environment_response.py delete mode 100644 src/humanloop/types/file_environment_response_file.py delete mode 100644 src/humanloop/types/file_environment_variable_request.py delete mode 100644 src/humanloop/types/file_id.py delete mode 100644 src/humanloop/types/file_path.py delete mode 100644 src/humanloop/types/file_request.py delete mode 100644 src/humanloop/types/file_sort_by.py delete mode 100644 src/humanloop/types/file_type.py delete mode 100644 src/humanloop/types/files_tool_type.py delete mode 100644 src/humanloop/types/flow_kernel_request.py delete mode 100644 src/humanloop/types/flow_log_response.py delete mode 100644 src/humanloop/types/flow_response.py delete mode 100644 src/humanloop/types/function_tool.py delete mode 100644 src/humanloop/types/function_tool_choice.py delete mode 100644 src/humanloop/types/http_validation_error.py delete mode 100644 src/humanloop/types/human_evaluator_request.py delete mode 100644 src/humanloop/types/human_evaluator_request_return_type.py delete mode 100644 src/humanloop/types/image_chat_content.py delete mode 100644 src/humanloop/types/image_url.py delete mode 100644 src/humanloop/types/image_url_detail.py delete mode 100644 src/humanloop/types/input_response.py delete mode 100644 src/humanloop/types/linked_file_request.py delete mode 100644 src/humanloop/types/linked_tool_response.py delete mode 100644 src/humanloop/types/list_agents.py delete mode 100644 src/humanloop/types/list_datasets.py delete mode 100644 src/humanloop/types/list_evaluators.py delete mode 100644 src/humanloop/types/list_flows.py delete mode 100644 src/humanloop/types/list_prompts.py delete mode 100644 src/humanloop/types/list_tools.py delete mode 100644 src/humanloop/types/llm_evaluator_request.py delete mode 100644 src/humanloop/types/log_response.py delete mode 100644 src/humanloop/types/log_status.py delete mode 100644 src/humanloop/types/log_stream_response.py delete mode 100644 src/humanloop/types/model_endpoints.py delete mode 100644 src/humanloop/types/model_providers.py delete mode 100644 src/humanloop/types/monitoring_evaluator_environment_request.py delete mode 100644 src/humanloop/types/monitoring_evaluator_response.py delete mode 100644 src/humanloop/types/monitoring_evaluator_state.py delete mode 100644 src/humanloop/types/monitoring_evaluator_version_request.py delete mode 100644 src/humanloop/types/numeric_evaluator_stats_response.py delete mode 100644 src/humanloop/types/observability_status.py delete mode 100644 src/humanloop/types/on_agent_call_enum.py delete mode 100644 src/humanloop/types/open_ai_reasoning_effort.py delete mode 100644 src/humanloop/types/overall_stats.py delete mode 100644 src/humanloop/types/paginated_data_agent_response.py delete mode 100644 src/humanloop/types/paginated_data_evaluation_log_response.py delete mode 100644 src/humanloop/types/paginated_data_evaluator_response.py delete mode 100644 src/humanloop/types/paginated_data_flow_response.py delete mode 100644 src/humanloop/types/paginated_data_log_response.py delete mode 100644 src/humanloop/types/paginated_data_prompt_response.py delete mode 100644 src/humanloop/types/paginated_data_tool_response.py delete mode 100644 src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py delete mode 100644 src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py delete mode 100644 src/humanloop/types/paginated_datapoint_response.py delete mode 100644 src/humanloop/types/paginated_dataset_response.py delete mode 100644 src/humanloop/types/paginated_evaluation_response.py delete mode 100644 src/humanloop/types/paginated_prompt_log_response.py delete mode 100644 src/humanloop/types/paginated_session_response.py delete mode 100644 src/humanloop/types/platform_access_enum.py delete mode 100644 src/humanloop/types/populate_template_response.py delete mode 100644 src/humanloop/types/populate_template_response_populated_template.py delete mode 100644 src/humanloop/types/populate_template_response_reasoning_effort.py delete mode 100644 src/humanloop/types/populate_template_response_stop.py delete mode 100644 src/humanloop/types/populate_template_response_template.py delete mode 100644 src/humanloop/types/prompt_call_log_response.py delete mode 100644 src/humanloop/types/prompt_call_response.py delete mode 100644 src/humanloop/types/prompt_call_response_tool_choice.py delete mode 100644 src/humanloop/types/prompt_call_stream_response.py delete mode 100644 src/humanloop/types/prompt_kernel_request.py delete mode 100644 src/humanloop/types/prompt_kernel_request_reasoning_effort.py delete mode 100644 src/humanloop/types/prompt_kernel_request_stop.py delete mode 100644 src/humanloop/types/prompt_kernel_request_template.py delete mode 100644 src/humanloop/types/prompt_log_response.py delete mode 100644 src/humanloop/types/prompt_log_response_tool_choice.py delete mode 100644 src/humanloop/types/prompt_response.py delete mode 100644 src/humanloop/types/prompt_response_reasoning_effort.py delete mode 100644 src/humanloop/types/prompt_response_stop.py delete mode 100644 src/humanloop/types/prompt_response_template.py delete mode 100644 src/humanloop/types/provider_api_keys.py delete mode 100644 src/humanloop/types/response_format.py delete mode 100644 src/humanloop/types/response_format_type.py delete mode 100644 src/humanloop/types/run_stats_response.py delete mode 100644 src/humanloop/types/run_stats_response_evaluator_stats_item.py delete mode 100644 src/humanloop/types/run_version_response.py delete mode 100644 src/humanloop/types/select_evaluator_stats_response.py delete mode 100644 src/humanloop/types/sort_order.py delete mode 100644 src/humanloop/types/template_language.py delete mode 100644 src/humanloop/types/text_chat_content.py delete mode 100644 src/humanloop/types/text_evaluator_stats_response.py delete mode 100644 src/humanloop/types/time_unit.py delete mode 100644 src/humanloop/types/tool_call.py delete mode 100644 src/humanloop/types/tool_call_response.py delete mode 100644 src/humanloop/types/tool_choice.py delete mode 100644 src/humanloop/types/tool_function.py delete mode 100644 src/humanloop/types/tool_kernel_request.py delete mode 100644 src/humanloop/types/tool_log_response.py delete mode 100644 src/humanloop/types/tool_response.py delete mode 100644 src/humanloop/types/update_dateset_action.py delete mode 100644 src/humanloop/types/update_evaluation_status_request.py delete mode 100644 src/humanloop/types/update_version_request.py delete mode 100644 src/humanloop/types/user_response.py delete mode 100644 src/humanloop/types/valence.py delete mode 100644 src/humanloop/types/validation_error.py delete mode 100644 src/humanloop/types/validation_error_loc_item.py delete mode 100644 src/humanloop/types/version_deployment_response.py delete mode 100644 src/humanloop/types/version_deployment_response_file.py delete mode 100644 src/humanloop/types/version_id.py delete mode 100644 src/humanloop/types/version_id_response.py delete mode 100644 src/humanloop/types/version_id_response_version.py delete mode 100644 src/humanloop/types/version_reference_response.py delete mode 100644 src/humanloop/types/version_stats_response.py delete mode 100644 src/humanloop/types/version_stats_response_evaluator_version_stats_item.py delete mode 100644 src/humanloop/types/version_status.py delete mode 100644 src/humanloop/version.py diff --git a/poetry.lock b/poetry.lock index 7cb0fb5c..016c7485 100644 --- a/poetry.lock +++ b/poetry.lock @@ -974,13 +974,13 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-anthropic" -version = "0.40.4" +version = "0.40.5" description = "OpenTelemetry Anthropic instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_anthropic-0.40.4-py3-none-any.whl", hash = "sha256:75045e9e89ca0bde1a6f31726c4ab2e8098bc3382ad1ebb28deff2f8fc098fe0"}, - {file = "opentelemetry_instrumentation_anthropic-0.40.4.tar.gz", hash = "sha256:d45579fad546043fd6d55677297dd2924bf6c40400dcc63b3eb2ca652ae4e0fd"}, + {file = "opentelemetry_instrumentation_anthropic-0.40.5-py3-none-any.whl", hash = "sha256:d3b203b0ee8ee06149711d7acfa0085ad44f1841d709fd4d639934b9d8aa87b4"}, + {file = "opentelemetry_instrumentation_anthropic-0.40.5.tar.gz", hash = "sha256:5a7e9b3852cd8cfc43e50450d83d40b1a15d12b295fb18d2c814576072fcc23f"}, ] [package.dependencies] @@ -991,13 +991,13 @@ opentelemetry-semantic-conventions-ai = "0.4.8" [[package]] name = "opentelemetry-instrumentation-bedrock" -version = "0.40.4" +version = "0.40.5" description = "OpenTelemetry Bedrock instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_bedrock-0.40.4-py3-none-any.whl", hash = "sha256:6cdc03e6e1a9747265bcf4926da9e226ee4b3feda54d69ef5cadb56e19954bf8"}, - {file = "opentelemetry_instrumentation_bedrock-0.40.4.tar.gz", hash = "sha256:0db189f33935df7907418d6439cda11d1542e00e4c628744eef816d74510b3b9"}, + {file = "opentelemetry_instrumentation_bedrock-0.40.5-py3-none-any.whl", hash = "sha256:5f3cf77c03dfa4ab04fc202bf91a98688053ecb7d1bc64b2eaf42c866fb77c69"}, + {file = "opentelemetry_instrumentation_bedrock-0.40.5.tar.gz", hash = "sha256:0ab690501101a67cff3c4037fa6bdfeb65d25d9ec365e97880ebe118a0a6dd30"}, ] [package.dependencies] @@ -1010,13 +1010,13 @@ tokenizers = ">=0.13.0" [[package]] name = "opentelemetry-instrumentation-cohere" -version = "0.40.4" +version = "0.40.5" description = "OpenTelemetry Cohere instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_cohere-0.40.4-py3-none-any.whl", hash = "sha256:aca41ad80a10c604d73c8167a2e26bfeb7463b04c634951fa30fcb69c3648456"}, - {file = "opentelemetry_instrumentation_cohere-0.40.4.tar.gz", hash = "sha256:c4ac17c6d32f772da06cd9ee51e5da546dae4b2b9aa3114ff69b822e2997bbd4"}, + {file = "opentelemetry_instrumentation_cohere-0.40.5-py3-none-any.whl", hash = "sha256:ff73b4eed87f1d79e737351aabec85fde3aaa2f793c87e508c20498ccbea3d48"}, + {file = "opentelemetry_instrumentation_cohere-0.40.5.tar.gz", hash = "sha256:98332b9bea8b9c84222682a57ebb431e693ab1d548a4a3f4301a9b1dfc3a6cbc"}, ] [package.dependencies] @@ -1027,13 +1027,13 @@ opentelemetry-semantic-conventions-ai = "0.4.8" [[package]] name = "opentelemetry-instrumentation-groq" -version = "0.40.4" +version = "0.40.5" description = "OpenTelemetry Groq instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_groq-0.40.4-py3-none-any.whl", hash = "sha256:ca27464ab8365220685a34d6531611c5ce5087a222e0d751543ca6bee226c354"}, - {file = "opentelemetry_instrumentation_groq-0.40.4.tar.gz", hash = "sha256:83f3834148fd21062943f3e44295bae4bcdde6ccfc5331cc94843a41849d0131"}, + {file = "opentelemetry_instrumentation_groq-0.40.5-py3-none-any.whl", hash = "sha256:dec20e7f50f648068b2cfb730da0e2297dbdc9a93840cbbc595f652cd0e9e94b"}, + {file = "opentelemetry_instrumentation_groq-0.40.5.tar.gz", hash = "sha256:036bda3c9317a3d34c7538479864d215e0f4e147b5fe475be4f1cd4402b2ae30"}, ] [package.dependencies] @@ -1044,13 +1044,13 @@ opentelemetry-semantic-conventions-ai = "0.4.8" [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.40.4" +version = "0.40.5" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_openai-0.40.4-py3-none-any.whl", hash = "sha256:39f8cc5a149cb5d9202a307280d8aca3eca703c2c689659fb45f256825c0cc55"}, - {file = "opentelemetry_instrumentation_openai-0.40.4.tar.gz", hash = "sha256:9a877f57967a068464bfb87ba38e8d34bcbd88483b4a3229e2876cd8334d0ad7"}, + {file = "opentelemetry_instrumentation_openai-0.40.5-py3-none-any.whl", hash = "sha256:533cbfc00a6d629998c5adb49e37a2165559a5d26bb6bb6a61f768bf23e96cf9"}, + {file = "opentelemetry_instrumentation_openai-0.40.5.tar.gz", hash = "sha256:691d9e7bca55b5a21538c86127ee5af05b033385212aaeb64eab2dd383cb815b"}, ] [package.dependencies] @@ -1062,13 +1062,13 @@ tiktoken = ">=0.6.0,<1" [[package]] name = "opentelemetry-instrumentation-replicate" -version = "0.40.4" +version = "0.40.5" description = "OpenTelemetry Replicate instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_replicate-0.40.4-py3-none-any.whl", hash = "sha256:1d1f606df50605fef00f5e14c8d64a008b181da15636d8a1b82f7b96294a616d"}, - {file = "opentelemetry_instrumentation_replicate-0.40.4.tar.gz", hash = "sha256:b9e16cab539e8799ffedbe7c7ed8f01a32ffe4a4e2ce4bb19ea03ad499e14f6d"}, + {file = "opentelemetry_instrumentation_replicate-0.40.5-py3-none-any.whl", hash = "sha256:948ecea48de37639433a64cc36bbb5f61f1de24122cafcf3101d5c4c113b7a82"}, + {file = "opentelemetry_instrumentation_replicate-0.40.5.tar.gz", hash = "sha256:d5d70375619ed286c80f25631ac1ab1cbe58146dcf90efa92203c7c93b8d5b6c"}, ] [package.dependencies] @@ -1274,18 +1274,18 @@ testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] [[package]] name = "pluggy" -version = "1.5.0" +version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, ] [package.extras] dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] +testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "protobuf" @@ -1830,125 +1830,125 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rpds-py" -version = "0.24.0" +version = "0.25.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" files = [ - {file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"}, - {file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"}, - {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8acd55bd5b071156bae57b555f5d33697998752673b9de554dd82f5b5352727"}, - {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7e80d375134ddb04231a53800503752093dbb65dad8dabacce2c84cccc78e964"}, - {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60748789e028d2a46fc1c70750454f83c6bdd0d05db50f5ae83e2db500b34da5"}, - {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e1daf5bf6c2be39654beae83ee6b9a12347cb5aced9a29eecf12a2d25fff664"}, - {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b221c2457d92a1fb3c97bee9095c874144d196f47c038462ae6e4a14436f7bc"}, - {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:66420986c9afff67ef0c5d1e4cdc2d0e5262f53ad11e4f90e5e22448df485bf0"}, - {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:43dba99f00f1d37b2a0265a259592d05fcc8e7c19d140fe51c6e6f16faabeb1f"}, - {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a88c0d17d039333a41d9bf4616bd062f0bd7aa0edeb6cafe00a2fc2a804e944f"}, - {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc31e13ce212e14a539d430428cd365e74f8b2d534f8bc22dd4c9c55b277b875"}, - {file = "rpds_py-0.24.0-cp310-cp310-win32.whl", hash = "sha256:fc2c1e1b00f88317d9de6b2c2b39b012ebbfe35fe5e7bef980fd2a91f6100a07"}, - {file = "rpds_py-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:c0145295ca415668420ad142ee42189f78d27af806fcf1f32a18e51d47dd2052"}, - {file = "rpds_py-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2d3ee4615df36ab8eb16c2507b11e764dcc11fd350bbf4da16d09cda11fcedef"}, - {file = "rpds_py-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e13ae74a8a3a0c2f22f450f773e35f893484fcfacb00bb4344a7e0f4f48e1f97"}, - {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf86f72d705fc2ef776bb7dd9e5fbba79d7e1f3e258bf9377f8204ad0fc1c51e"}, - {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c43583ea8517ed2e780a345dd9960896afc1327e8cf3ac8239c167530397440d"}, - {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cd031e63bc5f05bdcda120646a0d32f6d729486d0067f09d79c8db5368f4586"}, - {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:34d90ad8c045df9a4259c47d2e16a3f21fdb396665c94520dbfe8766e62187a4"}, - {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e838bf2bb0b91ee67bf2b889a1a841e5ecac06dd7a2b1ef4e6151e2ce155c7ae"}, - {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04ecf5c1ff4d589987b4d9882872f80ba13da7d42427234fce8f22efb43133bc"}, - {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:630d3d8ea77eabd6cbcd2ea712e1c5cecb5b558d39547ac988351195db433f6c"}, - {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ebcb786b9ff30b994d5969213a8430cbb984cdd7ea9fd6df06663194bd3c450c"}, - {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:174e46569968ddbbeb8a806d9922f17cd2b524aa753b468f35b97ff9c19cb718"}, - {file = "rpds_py-0.24.0-cp311-cp311-win32.whl", hash = "sha256:5ef877fa3bbfb40b388a5ae1cb00636a624690dcb9a29a65267054c9ea86d88a"}, - {file = "rpds_py-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:e274f62cbd274359eff63e5c7e7274c913e8e09620f6a57aae66744b3df046d6"}, - {file = "rpds_py-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d8551e733626afec514b5d15befabea0dd70a343a9f23322860c4f16a9430205"}, - {file = "rpds_py-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0e374c0ce0ca82e5b67cd61fb964077d40ec177dd2c4eda67dba130de09085c7"}, - {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d69d003296df4840bd445a5d15fa5b6ff6ac40496f956a221c4d1f6f7b4bc4d9"}, - {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8212ff58ac6dfde49946bea57474a386cca3f7706fc72c25b772b9ca4af6b79e"}, - {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:528927e63a70b4d5f3f5ccc1fa988a35456eb5d15f804d276709c33fc2f19bda"}, - {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a824d2c7a703ba6daaca848f9c3d5cb93af0505be505de70e7e66829affd676e"}, - {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d51febb7a114293ffd56c6cf4736cb31cd68c0fddd6aa303ed09ea5a48e029"}, - {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3fab5f4a2c64a8fb64fc13b3d139848817a64d467dd6ed60dcdd6b479e7febc9"}, - {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9be4f99bee42ac107870c61dfdb294d912bf81c3c6d45538aad7aecab468b6b7"}, - {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:564c96b6076a98215af52f55efa90d8419cc2ef45d99e314fddefe816bc24f91"}, - {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:75a810b7664c17f24bf2ffd7f92416c00ec84b49bb68e6a0d93e542406336b56"}, - {file = "rpds_py-0.24.0-cp312-cp312-win32.whl", hash = "sha256:f6016bd950be4dcd047b7475fdf55fb1e1f59fc7403f387be0e8123e4a576d30"}, - {file = "rpds_py-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:998c01b8e71cf051c28f5d6f1187abbdf5cf45fc0efce5da6c06447cba997034"}, - {file = "rpds_py-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2d8e4508e15fc05b31285c4b00ddf2e0eb94259c2dc896771966a163122a0c"}, - {file = "rpds_py-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0f00c16e089282ad68a3820fd0c831c35d3194b7cdc31d6e469511d9bffc535c"}, - {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951cc481c0c395c4a08639a469d53b7d4afa252529a085418b82a6b43c45c240"}, - {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9ca89938dff18828a328af41ffdf3902405a19f4131c88e22e776a8e228c5a8"}, - {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed0ef550042a8dbcd657dfb284a8ee00f0ba269d3f2286b0493b15a5694f9fe8"}, - {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b2356688e5d958c4d5cb964af865bea84db29971d3e563fb78e46e20fe1848b"}, - {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78884d155fd15d9f64f5d6124b486f3d3f7fd7cd71a78e9670a0f6f6ca06fb2d"}, - {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6a4a535013aeeef13c5532f802708cecae8d66c282babb5cd916379b72110cf7"}, - {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:84e0566f15cf4d769dade9b366b7b87c959be472c92dffb70462dd0844d7cbad"}, - {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:823e74ab6fbaa028ec89615ff6acb409e90ff45580c45920d4dfdddb069f2120"}, - {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c61a2cb0085c8783906b2f8b1f16a7e65777823c7f4d0a6aaffe26dc0d358dd9"}, - {file = "rpds_py-0.24.0-cp313-cp313-win32.whl", hash = "sha256:60d9b630c8025b9458a9d114e3af579a2c54bd32df601c4581bd054e85258143"}, - {file = "rpds_py-0.24.0-cp313-cp313-win_amd64.whl", hash = "sha256:6eea559077d29486c68218178ea946263b87f1c41ae7f996b1f30a983c476a5a"}, - {file = "rpds_py-0.24.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:d09dc82af2d3c17e7dd17120b202a79b578d79f2b5424bda209d9966efeed114"}, - {file = "rpds_py-0.24.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5fc13b44de6419d1e7a7e592a4885b323fbc2f46e1f22151e3a8ed3b8b920405"}, - {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c347a20d79cedc0a7bd51c4d4b7dbc613ca4e65a756b5c3e57ec84bd43505b47"}, - {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20f2712bd1cc26a3cc16c5a1bfee9ed1abc33d4cdf1aabd297fe0eb724df4272"}, - {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad911555286884be1e427ef0dc0ba3929e6821cbeca2194b13dc415a462c7fd"}, - {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0aeb3329c1721c43c58cae274d7d2ca85c1690d89485d9c63a006cb79a85771a"}, - {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a0f156e9509cee987283abd2296ec816225145a13ed0391df8f71bf1d789e2d"}, - {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa6800adc8204ce898c8a424303969b7aa6a5e4ad2789c13f8648739830323b7"}, - {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a18fc371e900a21d7392517c6f60fe859e802547309e94313cd8181ad9db004d"}, - {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9168764133fd919f8dcca2ead66de0105f4ef5659cbb4fa044f7014bed9a1797"}, - {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f6e3cec44ba05ee5cbdebe92d052f69b63ae792e7d05f1020ac5e964394080c"}, - {file = "rpds_py-0.24.0-cp313-cp313t-win32.whl", hash = "sha256:8ebc7e65ca4b111d928b669713865f021b7773350eeac4a31d3e70144297baba"}, - {file = "rpds_py-0.24.0-cp313-cp313t-win_amd64.whl", hash = "sha256:675269d407a257b8c00a6b58205b72eec8231656506c56fd429d924ca00bb350"}, - {file = "rpds_py-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a36b452abbf29f68527cf52e181fced56685731c86b52e852053e38d8b60bc8d"}, - {file = "rpds_py-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b3b397eefecec8e8e39fa65c630ef70a24b09141a6f9fc17b3c3a50bed6b50e"}, - {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdabcd3beb2a6dca7027007473d8ef1c3b053347c76f685f5f060a00327b8b65"}, - {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5db385bacd0c43f24be92b60c857cf760b7f10d8234f4bd4be67b5b20a7c0b6b"}, - {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8097b3422d020ff1c44effc40ae58e67d93e60d540a65649d2cdaf9466030791"}, - {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493fe54318bed7d124ce272fc36adbf59d46729659b2c792e87c3b95649cdee9"}, - {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8aa362811ccdc1f8dadcc916c6d47e554169ab79559319ae9fae7d7752d0d60c"}, - {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d8f9a6e7fd5434817526815f09ea27f2746c4a51ee11bb3439065f5fc754db58"}, - {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8205ee14463248d3349131bb8099efe15cd3ce83b8ef3ace63c7e976998e7124"}, - {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:921ae54f9ecba3b6325df425cf72c074cd469dea843fb5743a26ca7fb2ccb149"}, - {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32bab0a56eac685828e00cc2f5d1200c548f8bc11f2e44abf311d6b548ce2e45"}, - {file = "rpds_py-0.24.0-cp39-cp39-win32.whl", hash = "sha256:f5c0ed12926dec1dfe7d645333ea59cf93f4d07750986a586f511c0bc61fe103"}, - {file = "rpds_py-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:afc6e35f344490faa8276b5f2f7cbf71f88bc2cda4328e00553bd451728c571f"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:619ca56a5468f933d940e1bf431c6f4e13bef8e688698b067ae68eb4f9b30e3a"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b28e5122829181de1898c2c97f81c0b3246d49f585f22743a1246420bb8d399"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e5ab32cf9eb3647450bc74eb201b27c185d3857276162c101c0f8c6374e098"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:208b3a70a98cf3710e97cabdc308a51cd4f28aa6e7bb11de3d56cd8b74bab98d"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbc4362e06f950c62cad3d4abf1191021b2ffaf0b31ac230fbf0526453eee75e"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebea2821cdb5f9fef44933617be76185b80150632736f3d76e54829ab4a3b4d1"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4df06c35465ef4d81799999bba810c68d29972bf1c31db61bfdb81dd9d5bb"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3aa13bdf38630da298f2e0d77aca967b200b8cc1473ea05248f6c5e9c9bdb44"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:041f00419e1da7a03c46042453598479f45be3d787eb837af382bfc169c0db33"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:d8754d872a5dfc3c5bf9c0e059e8107451364a30d9fd50f1f1a85c4fb9481164"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:896c41007931217a343eff197c34513c154267636c8056fb409eafd494c3dcdc"}, - {file = "rpds_py-0.24.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:92558d37d872e808944c3c96d0423b8604879a3d1c86fdad508d7ed91ea547d5"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f9e0057a509e096e47c87f753136c9b10d7a91842d8042c2ee6866899a717c0d"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d6e109a454412ab82979c5b1b3aee0604eca4bbf9a02693bb9df027af2bfa91a"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc1c892b1ec1f8cbd5da8de287577b455e388d9c328ad592eabbdcb6fc93bee5"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9c39438c55983d48f4bb3487734d040e22dad200dab22c41e331cee145e7a50d"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d7e8ce990ae17dda686f7e82fd41a055c668e13ddcf058e7fb5e9da20b57793"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ea7f4174d2e4194289cb0c4e172d83e79a6404297ff95f2875cf9ac9bced8ba"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb2954155bb8f63bb19d56d80e5e5320b61d71084617ed89efedb861a684baea"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04f2b712a2206e13800a8136b07aaedc23af3facab84918e7aa89e4be0260032"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:eda5c1e2a715a4cbbca2d6d304988460942551e4e5e3b7457b50943cd741626d"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:9abc80fe8c1f87218db116016de575a7998ab1629078c90840e8d11ab423ee25"}, - {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6a727fd083009bc83eb83d6950f0c32b3c94c8b80a9b667c87f4bd1274ca30ba"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e0f3ef95795efcd3b2ec3fe0a5bcfb5dadf5e3996ea2117427e524d4fbf309c6"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2c13777ecdbbba2077670285dd1fe50828c8742f6a4119dbef6f83ea13ad10fb"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e8d804c2ccd618417e96720ad5cd076a86fa3f8cb310ea386a3e6229bae7d1"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd822f019ccccd75c832deb7aa040bb02d70a92eb15a2f16c7987b7ad4ee8d83"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0047638c3aa0dbcd0ab99ed1e549bbf0e142c9ecc173b6492868432d8989a046"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5b66d1b201cc71bc3081bc2f1fc36b0c1f268b773e03bbc39066651b9e18391"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbcbb6db5582ea33ce46a5d20a5793134b5365110d84df4e30b9d37c6fd40ad3"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63981feca3f110ed132fd217bf7768ee8ed738a55549883628ee3da75bb9cb78"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:3a55fc10fdcbf1a4bd3c018eea422c52cf08700cf99c28b5cb10fe97ab77a0d3"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:c30ff468163a48535ee7e9bf21bd14c7a81147c0e58a36c1078289a8ca7af0bd"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:369d9c6d4c714e36d4a03957b4783217a3ccd1e222cdd67d464a3a479fc17796"}, - {file = "rpds_py-0.24.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:24795c099453e3721fda5d8ddd45f5dfcc8e5a547ce7b8e9da06fecc3832e26f"}, - {file = "rpds_py-0.24.0.tar.gz", hash = "sha256:772cc1b2cd963e7e17e6cc55fe0371fb9c704d63e44cacec7b9b7f523b78919e"}, + {file = "rpds_py-0.25.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c146a24a8f0dc4a7846fb4640b88b3a68986585b8ce8397af15e66b7c5817439"}, + {file = "rpds_py-0.25.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:77814c7a4e1dc43fba73aeb4c1ef0fe37d901f3aa869a4823de5ea843a283fd0"}, + {file = "rpds_py-0.25.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5afbff2822016db3c696cb0c1432e6b1f0e34aa9280bc5184dc216812a24e70d"}, + {file = "rpds_py-0.25.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ffae52cd76837a5c16409359d236b1fced79e42e0792e8adf375095a5e855368"}, + {file = "rpds_py-0.25.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddf9426b740a7047b2b0dddcba775211542e8053ce1e509a1759b665fe573508"}, + {file = "rpds_py-0.25.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cad834f1a8f51eb037c3c4dc72c884c9e1e0644d900e2d45aa76450e4aa6282"}, + {file = "rpds_py-0.25.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c46bd76986e05689376d28fdc2b97d899576ce3e3aaa5a5f80f67a8300b26eb3"}, + {file = "rpds_py-0.25.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f3353a2d7eb7d5e0af8a7ca9fc85a34ba12619119bcdee6b8a28a6373cda65ce"}, + {file = "rpds_py-0.25.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fdc648d4e81eef5ac4bb35d731562dffc28358948410f3274d123320e125d613"}, + {file = "rpds_py-0.25.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:098d446d76d26e394b440d73921b49c1c90274d46ccbaadf346b1b78f9fdd4b1"}, + {file = "rpds_py-0.25.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c624c82e645f6b5465d08cdc802fb0cd53aa1478782fb2992b9e09f2c9426865"}, + {file = "rpds_py-0.25.0-cp310-cp310-win32.whl", hash = "sha256:9d0041bd9e2d2ef803b32d84a0c8115d178132da5691346465953a2a966ba8ca"}, + {file = "rpds_py-0.25.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8b41195a6b03280ab00749a438fbce761e7acfd5381051a570239d752376f27"}, + {file = "rpds_py-0.25.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6587ece9f205097c62d0e3d3cb7c06991eb0083ab6a9cf48951ec49c2ab7183c"}, + {file = "rpds_py-0.25.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b0a5651e350997cebcdc23016dca26c4d1993d29015a535284da3159796e30b6"}, + {file = "rpds_py-0.25.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3752a015db89ea3e9c04d5e185549be4aa29c1882150e094c614c0de8e788feb"}, + {file = "rpds_py-0.25.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a05b199c11d2f39c72de8c30668734b5d20974ad44b65324ea3e647a211f135d"}, + {file = "rpds_py-0.25.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2f91902fc0c95dd1fa6b30ebd2af83ace91e592f7fd6340a375588a9d4b9341b"}, + {file = "rpds_py-0.25.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98c729193e7abe498565266933c125780fb646e977e94289cadbb36e4eeeb370"}, + {file = "rpds_py-0.25.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36a7564deaac3f372e8b8b701eb982ea3113516e8e08cd87e3dc6ccf29bad14b"}, + {file = "rpds_py-0.25.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6b0c0f671a53c129ea48f9481e95532579cc489ab5a0ffe750c9020787181c48"}, + {file = "rpds_py-0.25.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d21408eaa157063f56e58ca50da27cad67c4395a85fb44cc7a31253ea4e58918"}, + {file = "rpds_py-0.25.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a413674eb2bd2ecb2b93fcc928871b19f7220ee04bca4af3375c50a2b32b5a50"}, + {file = "rpds_py-0.25.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:94f89161a3e358db33310a8a064852a6eb119ed1aa1a3dba927b4e5140e65d00"}, + {file = "rpds_py-0.25.0-cp311-cp311-win32.whl", hash = "sha256:540cd89d256119845b7f8f56c4bb80cad280cab92d9ca473be49ea13e678fd44"}, + {file = "rpds_py-0.25.0-cp311-cp311-win_amd64.whl", hash = "sha256:2649ff19291928243f90c86e4dc9cd86c8c4c6a73c3693ba2e23bc2fbcd8338c"}, + {file = "rpds_py-0.25.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:89260601d497fa5957c3e46f10b16cfa2a4808ad4dd46cddc0b997461923a7d9"}, + {file = "rpds_py-0.25.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:637ec39f97e342a3f76af739eda96800549d92f3aa27a2170b6dcbdffd49f480"}, + {file = "rpds_py-0.25.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bd08c82336412a39a598e5baccab2ee2d7bd54e9115c8b64f2febb45da5c368"}, + {file = "rpds_py-0.25.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:837fd066f974e5b98c69ac83ec594b79a2724a39a92a157b8651615e5032e530"}, + {file = "rpds_py-0.25.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:653a066d2a4a332d4f8a11813e8124b643fa7b835b78468087a9898140469eee"}, + {file = "rpds_py-0.25.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91a51499be506022b9f09facfc42f0c3a1c45969c0fc8f0bbebc8ff23ab9e531"}, + {file = "rpds_py-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb91471640390a82744b164f8a0be4d7c89d173b1170713f9639c6bad61e9e64"}, + {file = "rpds_py-0.25.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28bd2969445acc2d6801a22f97a43134ae3cb18e7495d668bfaa8d82b8526cdc"}, + {file = "rpds_py-0.25.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f933b35fa563f047896a70b69414dfb3952831817e4c4b3a6faa96737627f363"}, + {file = "rpds_py-0.25.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:80b37b37525492250adc7cbca20ae7084f86eb3eb62414b624d2a400370853b1"}, + {file = "rpds_py-0.25.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:864573b6440b770db5a8693547a8728d7fd32580d4903010a8eee0bb5b03b130"}, + {file = "rpds_py-0.25.0-cp312-cp312-win32.whl", hash = "sha256:ad4a896896346adab86d52b31163c39d49e4e94c829494b96cc064bff82c5851"}, + {file = "rpds_py-0.25.0-cp312-cp312-win_amd64.whl", hash = "sha256:4fbec54cc42fa90ca69158d75f125febc4116b2d934e71c78f97de1388a8feb2"}, + {file = "rpds_py-0.25.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4e5fe366fa53bd6777cf5440245366705338587b2cf8d61348ddaad744eb591a"}, + {file = "rpds_py-0.25.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:54f925ff8d4443b7cae23a5215954abbf4736a3404188bde53c4d744ac001d89"}, + {file = "rpds_py-0.25.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d58258a66255b2500ddaa4f33191ada5ec983a429c09eb151daf81efbb9aa115"}, + {file = "rpds_py-0.25.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f3a57f08c558d0983a708bfe6d1265f47b5debff9b366b2f2091690fada055c"}, + {file = "rpds_py-0.25.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7d60d42f1b9571341ad2322e748f7a60f9847546cd801a3a0eb72a1b54c6519"}, + {file = "rpds_py-0.25.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a54b94b0e4de95aa92618906fb631779d9fde29b4bf659f482c354a3a79fd025"}, + {file = "rpds_py-0.25.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af1c2241919304cc2f90e7dcb3eb1c1df6fb4172dd338e629dd6410e48b3d1a0"}, + {file = "rpds_py-0.25.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7d34547810bfd61acf8a441e8a3651e7a919e8e8aed29850be14a1b05cfc6f41"}, + {file = "rpds_py-0.25.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66568caacf18542f0cf213db7adf3de2da6ad58c7bf2c4fafec0d81ae557443b"}, + {file = "rpds_py-0.25.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e49e4c3e899c32884d7828c91d6c3aff08d2f18857f50f86cc91187c31a4ca58"}, + {file = "rpds_py-0.25.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:20af08b0b2d5b196a2bcb70becf0b97ec5af579cee0ae6750b08a2eea3b6c77d"}, + {file = "rpds_py-0.25.0-cp313-cp313-win32.whl", hash = "sha256:d3dc8d6ce8f001c80919bdb49d8b0b815185933a0b8e9cdeaea42b0b6f27eeb0"}, + {file = "rpds_py-0.25.0-cp313-cp313-win_amd64.whl", hash = "sha256:113d134dc5a8d2503630ca2707b58a1bf5b1b3c69b35c7dab8690ee650c111b8"}, + {file = "rpds_py-0.25.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:6c72a4a8fab10bc96720ad40941bb471e3b1150fb8d62dab205d495511206cf1"}, + {file = "rpds_py-0.25.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:bb979162323f3534dce84b59f86e689a0761a2a300e0212bfaedfa80d4eb8100"}, + {file = "rpds_py-0.25.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35c8cb5dcf7d36d3adf2ae0730b60fb550a8feb6e432bee7ef84162a0d15714b"}, + {file = "rpds_py-0.25.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:673ba018df5ae5e7b6c9a021d51ffe39c0ae1daa0041611ed27a0bca634b2d2e"}, + {file = "rpds_py-0.25.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16fb28d3a653f67c871a47c5ca0be17bce9fab8adb8bcf7bd09f3771b8c4d860"}, + {file = "rpds_py-0.25.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12a84c3851f9e68633d883c01347db3cb87e6160120a489f9c47162cd276b0a5"}, + {file = "rpds_py-0.25.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b5f457afffb45d3804728a54083e31fbaf460e902e3f7d063e56d0d0814301e"}, + {file = "rpds_py-0.25.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9442cbff21122e9a529b942811007d65eabe4182e7342d102caf119b229322c6"}, + {file = "rpds_py-0.25.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:383cf0d4288baf5a16812ed70d54ecb7f2064e255eb7fe42c38e926adeae4534"}, + {file = "rpds_py-0.25.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0dcdee07ebf76223092666c72a9552db276fbe46b98830ecd1bb836cc98adc81"}, + {file = "rpds_py-0.25.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5bbfbd9c74c4dd74815bd532bf29bedea6d27d38f35ef46f9754172a14e4c655"}, + {file = "rpds_py-0.25.0-cp313-cp313t-win32.whl", hash = "sha256:90dbd2c42cb6463c07020695800ae8f347e7dbeff09da2975a988e467b624539"}, + {file = "rpds_py-0.25.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8c2ad59c4342a176cb3e0d5753e1c911eabc95c210fc6d0e913c32bf560bf012"}, + {file = "rpds_py-0.25.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9f9a1b15b875160186177f659cde2b0f899182b0aca49457d6396afc4bbda7b9"}, + {file = "rpds_py-0.25.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e849315963eb08c26167d0f2c0f9319c9bd379daea75092b3c595d70be6209d"}, + {file = "rpds_py-0.25.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ad37c29adc435e6d8b24be86b03596183ee8d4bb8580cc4c676879b0b896a99"}, + {file = "rpds_py-0.25.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:587cad3959d3d85127cf5df1624cdce569bb3796372e00420baad46af7c56b9b"}, + {file = "rpds_py-0.25.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce0518667855a1598d9b1f4fcf0fed1182c67c5ba4fe6a2c6bce93440a65cead"}, + {file = "rpds_py-0.25.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c18cb2f6805861dcdf11fb0b3c111a0335f6475411687db2f6636f32bed66b0"}, + {file = "rpds_py-0.25.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a21f4584f69547ae03aaa21be98753e85599f3437b84039da5dc20b53abe987"}, + {file = "rpds_py-0.25.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3d7d65aa934899849628137ab391562cdb487c6ffb9b9781319a64a9c66afbce"}, + {file = "rpds_py-0.25.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fd9167e9604cb5a218a2e847aa8cdc5f98b379a673371978ee7b0c11b4d2e140"}, + {file = "rpds_py-0.25.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6c27156c8d836e7ff760767e93245b286ae028bfd81d305db676662d1f642637"}, + {file = "rpds_py-0.25.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:66087711faf29cb3ac8ab05341939aec29968626aff8ef18e483e229055dd9a7"}, + {file = "rpds_py-0.25.0-cp39-cp39-win32.whl", hash = "sha256:f2e69415e4e33cdeee50ebc2c4d8fcbef12c3181d9274e512ccd2a905a76aad1"}, + {file = "rpds_py-0.25.0-cp39-cp39-win_amd64.whl", hash = "sha256:58cfaa54752d6d2b4f10e87571688dbb7792327a69eca5417373d77d42787058"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57e9616a2a9da08fe0994e37a0c6f578fbaf6d35911bcba31e99660542d60c45"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6d95521901896a90a858993bfa3ec0f9160d3d97e8c8fefc279b3306cdadfee0"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33aef3914a5b49db12ed3f24d214ffa50caefc8f4b0c7c7b9485bd4b231a898"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4acbe2349a3baac9cc212005b6cb4bbb7e5b34538886cde4f55dfc29173da1d6"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9b75b5d3416b00d064a5e6f4814fdfb18a964a7cf38dc00b5c2c02fa30a7dd0b"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:542a6f1d0f400b9ce1facb3e30dd3dc84e4affc60353509b00a7bdcd064be91e"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60ba9d104f4e8496107b1cb86e45a68a16d13511dc3986e0780e9f85c2136f9"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6065a489b7b284efb29d57adffae2b9b5e9403d3c8d95cfa04e04e024e6b4e77"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:6bcca4d0d24d8c37bfe0cafdaaf4346b6c516db21ccaad5c7fba0a0df818dfc9"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:8155e21203161e5c78791fc049b99f0bbbf14d1d1839c8c93c8344957f9e8e1e"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6a1eda14db1ac7a2ab4536dfe69e4d37fdd765e8e784ae4451e61582ebb76012"}, + {file = "rpds_py-0.25.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:de34a7d1893be76cb015929690dce3bde29f4de08143da2e9ad1cedb11dbf80e"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0d63a86b457069d669c423f093db4900aa102f0e5a626973eff4db8355c0fd96"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:89bb2b20829270aca28b1e5481be8ee24cb9aa86e6c0c81cb4ada2112c9588c5"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e103b48e63fd2b8a8e2b21ab5b5299a7146045626c2ed4011511ea8122d217"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fccd24c080850715c58a80200d367bc62b4bff6c9fb84e9564da1ebcafea6418"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12b42790c91e0041a98f0ec04244fb334696938793e785a5d4c7e56ca534d7da"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc907ea12216cfc5560148fc42459d86740fc739981c6feb94230dab09362679"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e11065b759c38c4945f8c9765ed2910e31fa5b2f7733401eb7d966f468367a2"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8abc1a3e29b599bf8bb5ad455256a757e8b0ed5621e7e48abe8209932dc6d11e"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:cd36b71f9f3bf195b2dd9be5eafbfc9409e6c8007aebc38a4dc051f522008033"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:805a0dff0674baa3f360c21dcbc622ae544f2bb4753d87a4a56a1881252a477e"}, + {file = "rpds_py-0.25.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:96742796f499ac23b59856db734e65b286d1214a0d9b57bcd7bece92d9201fa4"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7715597186a7277be12729c896019226321bad1f047da381ab707b177aa5017c"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b049dd0792d51f07193cd934acec89abe84d2607109e6ca223b2f0ff24f0c7d"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87c6ff87b38f46d712418d78b34db1198408a3d9a42eddc640644aea561216b1"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240251fd95b057c26f8538d0e673bf983eba4f38da95fbaf502bfc1a768b3984"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85587479f210350e9d9d25e505f422dd636e561658382ee8947357a4bac491ad"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:551897221bbc9de17bce4574810347db8ec1ba4ec2f50f35421790d34bdb6ef9"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3d50ac3b772c10e0b918a5ce2e871138896bfb5f35050ff1ff87ddca45961fc"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8029c19c8a32ef3093c417dd16a5f806e7f529fcceea7c627b2635e9da5104da"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:fe7439d9c5b402af2c9911c7facda1808d0c8dbfa9cf085e6aeac511a23f7d87"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:77910d6bec321c9fccfe9cf5e407fed9d2c48a5e510473b4f070d5cf2413c003"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:0ee0cc81f875e853ccdf3badb44b67f771fb9149baa9e752777ccdcaf052ad26"}, + {file = "rpds_py-0.25.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:469054e6b2f8e41f1fe62b95f617082019d343eddeec3219ff3909067e672fb9"}, + {file = "rpds_py-0.25.0.tar.gz", hash = "sha256:4d97661bf5848dd9e5eb7ded480deccf9d32ce2cd500b88a26acbf7bd2864985"}, ] [[package]] @@ -2179,13 +2179,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.0.20250328" +version = "2.32.0.20250515" description = "Typing stubs for requests" optional = false python-versions = ">=3.9" files = [ - {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"}, - {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"}, + {file = "types_requests-2.32.0.20250515-py3-none-any.whl", hash = "sha256:f8eba93b3a892beee32643ff836993f15a785816acca21ea0ffa006f05ef0fb2"}, + {file = "types_requests-2.32.0.20250515.tar.gz", hash = "sha256:09c8b63c11318cb2460813871aaa48b671002e59fda67ca909e9883777787581"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index 7e9ead35..60bcf14e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -112,3 +112,6 @@ section-order = ["future", "standard-library", "third-party", "first-party"] [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" + +[tool.poetry.scripts] +humanloop = "humanloop.cli.__main__:cli" diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py deleted file mode 100644 index 407e3fb6..00000000 --- a/src/humanloop/__init__.py +++ /dev/null @@ -1,872 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .types import ( - AgentCallResponse, - AgentCallResponseToolChoice, - AgentCallStreamResponse, - AgentCallStreamResponsePayload, - AgentConfigResponse, - AgentContinueCallResponse, - AgentContinueCallResponseToolChoice, - AgentContinueCallStreamResponse, - AgentContinueCallStreamResponsePayload, - AgentInlineTool, - AgentKernelRequest, - AgentKernelRequestReasoningEffort, - AgentKernelRequestStop, - AgentKernelRequestTemplate, - AgentKernelRequestToolsItem, - AgentLinkedFileRequest, - AgentLinkedFileResponse, - AgentLinkedFileResponseFile, - AgentLogResponse, - AgentLogResponseToolChoice, - AgentLogStreamResponse, - AgentResponse, - AgentResponseReasoningEffort, - AgentResponseStop, - AgentResponseTemplate, - AgentResponseToolsItem, - AnthropicRedactedThinkingContent, - AnthropicThinkingContent, - BaseModelsUserResponse, - BooleanEvaluatorStatsResponse, - ChatMessage, - ChatMessageContent, - ChatMessageContentItem, - ChatMessageThinkingItem, - ChatRole, - ChatToolType, - CodeEvaluatorRequest, - ConfigToolResponse, - CreateAgentLogResponse, - CreateDatapointRequest, - CreateDatapointRequestTargetValue, - CreateEvaluatorLogResponse, - CreateFlowLogResponse, - CreatePromptLogResponse, - CreateToolLogResponse, - DashboardConfiguration, - DatapointResponse, - DatapointResponseTargetValue, - DatasetResponse, - DatasetsRequest, - DirectoryResponse, - DirectoryWithParentsAndChildrenResponse, - DirectoryWithParentsAndChildrenResponseFilesItem, - EnvironmentResponse, - EnvironmentTag, - EvaluateeRequest, - EvaluateeResponse, - EvaluationEvaluatorResponse, - EvaluationLogResponse, - EvaluationResponse, - EvaluationRunResponse, - EvaluationRunsResponse, - EvaluationStats, - EvaluationStatus, - EvaluationsDatasetRequest, - EvaluationsRequest, - EvaluatorActivationDeactivationRequest, - EvaluatorActivationDeactivationRequestActivateItem, - EvaluatorActivationDeactivationRequestDeactivateItem, - EvaluatorAggregate, - EvaluatorArgumentsType, - EvaluatorConfigResponse, - EvaluatorFileId, - EvaluatorFilePath, - EvaluatorJudgmentNumberLimit, - EvaluatorJudgmentOptionResponse, - EvaluatorLogResponse, - EvaluatorLogResponseJudgment, - EvaluatorResponse, - EvaluatorResponseSpec, - EvaluatorReturnTypeEnum, - EvaluatorVersionId, - EvaluatorsRequest, - EventType, - ExternalEvaluatorRequest, - FeedbackType, - FileEnvironmentResponse, - FileEnvironmentResponseFile, - FileEnvironmentVariableRequest, - FileId, - FilePath, - FileRequest, - FileSortBy, - FileType, - FilesToolType, - FlowKernelRequest, - FlowLogResponse, - FlowResponse, - FunctionTool, - FunctionToolChoice, - HttpValidationError, - HumanEvaluatorRequest, - HumanEvaluatorRequestReturnType, - ImageChatContent, - ImageUrl, - ImageUrlDetail, - InputResponse, - LinkedFileRequest, - LinkedToolResponse, - ListAgents, - ListDatasets, - ListEvaluators, - ListFlows, - ListPrompts, - ListTools, - LlmEvaluatorRequest, - LogResponse, - LogStatus, - LogStreamResponse, - ModelEndpoints, - ModelProviders, - MonitoringEvaluatorEnvironmentRequest, - MonitoringEvaluatorResponse, - MonitoringEvaluatorState, - MonitoringEvaluatorVersionRequest, - NumericEvaluatorStatsResponse, - ObservabilityStatus, - OnAgentCallEnum, - OpenAiReasoningEffort, - OverallStats, - PaginatedDataAgentResponse, - PaginatedDataEvaluationLogResponse, - PaginatedDataEvaluatorResponse, - PaginatedDataFlowResponse, - PaginatedDataLogResponse, - PaginatedDataPromptResponse, - PaginatedDataToolResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, - PaginatedDatapointResponse, - PaginatedDatasetResponse, - PaginatedEvaluationResponse, - PaginatedPromptLogResponse, - PaginatedSessionResponse, - PlatformAccessEnum, - PopulateTemplateResponse, - PopulateTemplateResponsePopulatedTemplate, - PopulateTemplateResponseReasoningEffort, - PopulateTemplateResponseStop, - PopulateTemplateResponseTemplate, - PromptCallLogResponse, - PromptCallResponse, - PromptCallResponseToolChoice, - PromptCallStreamResponse, - PromptKernelRequest, - PromptKernelRequestReasoningEffort, - PromptKernelRequestStop, - PromptKernelRequestTemplate, - PromptLogResponse, - PromptLogResponseToolChoice, - PromptResponse, - PromptResponseReasoningEffort, - PromptResponseStop, - PromptResponseTemplate, - ProviderApiKeys, - ResponseFormat, - ResponseFormatType, - RunStatsResponse, - RunStatsResponseEvaluatorStatsItem, - RunVersionResponse, - SelectEvaluatorStatsResponse, - SortOrder, - TemplateLanguage, - TextChatContent, - TextEvaluatorStatsResponse, - TimeUnit, - ToolCall, - ToolCallResponse, - ToolChoice, - ToolFunction, - ToolKernelRequest, - ToolLogResponse, - ToolResponse, - UpdateDatesetAction, - UpdateEvaluationStatusRequest, - UpdateVersionRequest, - UserResponse, - Valence, - ValidationError, - ValidationErrorLocItem, - VersionDeploymentResponse, - VersionDeploymentResponseFile, - VersionId, - VersionIdResponse, - VersionIdResponseVersion, - VersionReferenceResponse, - VersionStatsResponse, - VersionStatsResponseEvaluatorVersionStatsItem, - VersionStatus, -) -from .errors import UnprocessableEntityError -from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools -from .agents import ( - AgentLogRequestAgent, - AgentLogRequestAgentParams, - AgentLogRequestToolChoice, - AgentLogRequestToolChoiceParams, - AgentRequestReasoningEffort, - AgentRequestReasoningEffortParams, - AgentRequestStop, - AgentRequestStopParams, - AgentRequestTemplate, - AgentRequestTemplateParams, - AgentRequestToolsItem, - AgentRequestToolsItemParams, - AgentsCallRequestAgent, - AgentsCallRequestAgentParams, - AgentsCallRequestToolChoice, - AgentsCallRequestToolChoiceParams, - AgentsCallStreamRequestAgent, - AgentsCallStreamRequestAgentParams, - AgentsCallStreamRequestToolChoice, - AgentsCallStreamRequestToolChoiceParams, -) -from .client import AsyncHumanloop, Humanloop -from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints -from .environment import HumanloopEnvironment -from .evaluations import ( - AddEvaluatorsRequestEvaluatorsItem, - AddEvaluatorsRequestEvaluatorsItemParams, - CreateEvaluationRequestEvaluatorsItem, - CreateEvaluationRequestEvaluatorsItemParams, - CreateRunRequestDataset, - CreateRunRequestDatasetParams, - CreateRunRequestVersion, - CreateRunRequestVersionParams, -) -from .evaluators import ( - CreateEvaluatorLogRequestJudgment, - CreateEvaluatorLogRequestJudgmentParams, - CreateEvaluatorLogRequestSpec, - CreateEvaluatorLogRequestSpecParams, - EvaluatorRequestSpec, - EvaluatorRequestSpecParams, -) -from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams -from .prompts import ( - PromptLogRequestPrompt, - PromptLogRequestPromptParams, - PromptLogRequestToolChoice, - PromptLogRequestToolChoiceParams, - PromptLogUpdateRequestToolChoice, - PromptLogUpdateRequestToolChoiceParams, - PromptRequestReasoningEffort, - PromptRequestReasoningEffortParams, - PromptRequestStop, - PromptRequestStopParams, - PromptRequestTemplate, - PromptRequestTemplateParams, - PromptsCallRequestPrompt, - PromptsCallRequestPromptParams, - PromptsCallRequestToolChoice, - PromptsCallRequestToolChoiceParams, - PromptsCallStreamRequestPrompt, - PromptsCallStreamRequestPromptParams, - PromptsCallStreamRequestToolChoice, - PromptsCallStreamRequestToolChoiceParams, -) -from .requests import ( - AgentCallResponseParams, - AgentCallResponseToolChoiceParams, - AgentCallStreamResponseParams, - AgentCallStreamResponsePayloadParams, - AgentConfigResponseParams, - AgentContinueCallResponseParams, - AgentContinueCallResponseToolChoiceParams, - AgentContinueCallStreamResponseParams, - AgentContinueCallStreamResponsePayloadParams, - AgentInlineToolParams, - AgentKernelRequestParams, - AgentKernelRequestReasoningEffortParams, - AgentKernelRequestStopParams, - AgentKernelRequestTemplateParams, - AgentKernelRequestToolsItemParams, - AgentLinkedFileRequestParams, - AgentLinkedFileResponseFileParams, - AgentLinkedFileResponseParams, - AgentLogResponseParams, - AgentLogResponseToolChoiceParams, - AgentLogStreamResponseParams, - AgentResponseParams, - AgentResponseReasoningEffortParams, - AgentResponseStopParams, - AgentResponseTemplateParams, - AgentResponseToolsItemParams, - AnthropicRedactedThinkingContentParams, - AnthropicThinkingContentParams, - BooleanEvaluatorStatsResponseParams, - ChatMessageContentItemParams, - ChatMessageContentParams, - ChatMessageParams, - ChatMessageThinkingItemParams, - CodeEvaluatorRequestParams, - CreateAgentLogResponseParams, - CreateDatapointRequestParams, - CreateDatapointRequestTargetValueParams, - CreateEvaluatorLogResponseParams, - CreateFlowLogResponseParams, - CreatePromptLogResponseParams, - CreateToolLogResponseParams, - DashboardConfigurationParams, - DatapointResponseParams, - DatapointResponseTargetValueParams, - DatasetResponseParams, - DirectoryResponseParams, - DirectoryWithParentsAndChildrenResponseFilesItemParams, - DirectoryWithParentsAndChildrenResponseParams, - EnvironmentResponseParams, - EvaluateeRequestParams, - EvaluateeResponseParams, - EvaluationEvaluatorResponseParams, - EvaluationLogResponseParams, - EvaluationResponseParams, - EvaluationRunResponseParams, - EvaluationRunsResponseParams, - EvaluationStatsParams, - EvaluatorActivationDeactivationRequestActivateItemParams, - EvaluatorActivationDeactivationRequestDeactivateItemParams, - EvaluatorActivationDeactivationRequestParams, - EvaluatorAggregateParams, - EvaluatorConfigResponseParams, - EvaluatorFileIdParams, - EvaluatorFilePathParams, - EvaluatorJudgmentNumberLimitParams, - EvaluatorJudgmentOptionResponseParams, - EvaluatorLogResponseJudgmentParams, - EvaluatorLogResponseParams, - EvaluatorResponseParams, - EvaluatorResponseSpecParams, - EvaluatorVersionIdParams, - ExternalEvaluatorRequestParams, - FileEnvironmentResponseFileParams, - FileEnvironmentResponseParams, - FileEnvironmentVariableRequestParams, - FileIdParams, - FilePathParams, - FileRequestParams, - FlowKernelRequestParams, - FlowLogResponseParams, - FlowResponseParams, - FunctionToolChoiceParams, - FunctionToolParams, - HttpValidationErrorParams, - HumanEvaluatorRequestParams, - ImageChatContentParams, - ImageUrlParams, - InputResponseParams, - LinkedFileRequestParams, - LinkedToolResponseParams, - ListAgentsParams, - ListDatasetsParams, - ListEvaluatorsParams, - ListFlowsParams, - ListPromptsParams, - ListToolsParams, - LlmEvaluatorRequestParams, - LogResponseParams, - LogStreamResponseParams, - MonitoringEvaluatorEnvironmentRequestParams, - MonitoringEvaluatorResponseParams, - MonitoringEvaluatorVersionRequestParams, - NumericEvaluatorStatsResponseParams, - OverallStatsParams, - PaginatedDataAgentResponseParams, - PaginatedDataEvaluationLogResponseParams, - PaginatedDataEvaluatorResponseParams, - PaginatedDataFlowResponseParams, - PaginatedDataLogResponseParams, - PaginatedDataPromptResponseParams, - PaginatedDataToolResponseParams, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, - PaginatedDatapointResponseParams, - PaginatedDatasetResponseParams, - PaginatedEvaluationResponseParams, - PopulateTemplateResponseParams, - PopulateTemplateResponsePopulatedTemplateParams, - PopulateTemplateResponseReasoningEffortParams, - PopulateTemplateResponseStopParams, - PopulateTemplateResponseTemplateParams, - PromptCallLogResponseParams, - PromptCallResponseParams, - PromptCallResponseToolChoiceParams, - PromptCallStreamResponseParams, - PromptKernelRequestParams, - PromptKernelRequestReasoningEffortParams, - PromptKernelRequestStopParams, - PromptKernelRequestTemplateParams, - PromptLogResponseParams, - PromptLogResponseToolChoiceParams, - PromptResponseParams, - PromptResponseReasoningEffortParams, - PromptResponseStopParams, - PromptResponseTemplateParams, - ProviderApiKeysParams, - ResponseFormatParams, - RunStatsResponseEvaluatorStatsItemParams, - RunStatsResponseParams, - RunVersionResponseParams, - SelectEvaluatorStatsResponseParams, - TextChatContentParams, - TextEvaluatorStatsResponseParams, - ToolCallParams, - ToolCallResponseParams, - ToolChoiceParams, - ToolFunctionParams, - ToolKernelRequestParams, - ToolLogResponseParams, - ToolResponseParams, - UpdateVersionRequestParams, - ValidationErrorLocItemParams, - ValidationErrorParams, - VersionDeploymentResponseFileParams, - VersionDeploymentResponseParams, - VersionIdParams, - VersionIdResponseParams, - VersionIdResponseVersionParams, - VersionReferenceResponseParams, - VersionStatsResponseEvaluatorVersionStatsItemParams, - VersionStatsResponseParams, -) -from .version import __version__ - -__all__ = [ - "AddEvaluatorsRequestEvaluatorsItem", - "AddEvaluatorsRequestEvaluatorsItemParams", - "AgentCallResponse", - "AgentCallResponseParams", - "AgentCallResponseToolChoice", - "AgentCallResponseToolChoiceParams", - "AgentCallStreamResponse", - "AgentCallStreamResponseParams", - "AgentCallStreamResponsePayload", - "AgentCallStreamResponsePayloadParams", - "AgentConfigResponse", - "AgentConfigResponseParams", - "AgentContinueCallResponse", - "AgentContinueCallResponseParams", - "AgentContinueCallResponseToolChoice", - "AgentContinueCallResponseToolChoiceParams", - "AgentContinueCallStreamResponse", - "AgentContinueCallStreamResponseParams", - "AgentContinueCallStreamResponsePayload", - "AgentContinueCallStreamResponsePayloadParams", - "AgentInlineTool", - "AgentInlineToolParams", - "AgentKernelRequest", - "AgentKernelRequestParams", - "AgentKernelRequestReasoningEffort", - "AgentKernelRequestReasoningEffortParams", - "AgentKernelRequestStop", - "AgentKernelRequestStopParams", - "AgentKernelRequestTemplate", - "AgentKernelRequestTemplateParams", - "AgentKernelRequestToolsItem", - "AgentKernelRequestToolsItemParams", - "AgentLinkedFileRequest", - "AgentLinkedFileRequestParams", - "AgentLinkedFileResponse", - "AgentLinkedFileResponseFile", - "AgentLinkedFileResponseFileParams", - "AgentLinkedFileResponseParams", - "AgentLogRequestAgent", - "AgentLogRequestAgentParams", - "AgentLogRequestToolChoice", - "AgentLogRequestToolChoiceParams", - "AgentLogResponse", - "AgentLogResponseParams", - "AgentLogResponseToolChoice", - "AgentLogResponseToolChoiceParams", - "AgentLogStreamResponse", - "AgentLogStreamResponseParams", - "AgentRequestReasoningEffort", - "AgentRequestReasoningEffortParams", - "AgentRequestStop", - "AgentRequestStopParams", - "AgentRequestTemplate", - "AgentRequestTemplateParams", - "AgentRequestToolsItem", - "AgentRequestToolsItemParams", - "AgentResponse", - "AgentResponseParams", - "AgentResponseReasoningEffort", - "AgentResponseReasoningEffortParams", - "AgentResponseStop", - "AgentResponseStopParams", - "AgentResponseTemplate", - "AgentResponseTemplateParams", - "AgentResponseToolsItem", - "AgentResponseToolsItemParams", - "AgentsCallRequestAgent", - "AgentsCallRequestAgentParams", - "AgentsCallRequestToolChoice", - "AgentsCallRequestToolChoiceParams", - "AgentsCallStreamRequestAgent", - "AgentsCallStreamRequestAgentParams", - "AgentsCallStreamRequestToolChoice", - "AgentsCallStreamRequestToolChoiceParams", - "AnthropicRedactedThinkingContent", - "AnthropicRedactedThinkingContentParams", - "AnthropicThinkingContent", - "AnthropicThinkingContentParams", - "AsyncHumanloop", - "BaseModelsUserResponse", - "BooleanEvaluatorStatsResponse", - "BooleanEvaluatorStatsResponseParams", - "ChatMessage", - "ChatMessageContent", - "ChatMessageContentItem", - "ChatMessageContentItemParams", - "ChatMessageContentParams", - "ChatMessageParams", - "ChatMessageThinkingItem", - "ChatMessageThinkingItemParams", - "ChatRole", - "ChatToolType", - "CodeEvaluatorRequest", - "CodeEvaluatorRequestParams", - "ConfigToolResponse", - "CreateAgentLogResponse", - "CreateAgentLogResponseParams", - "CreateDatapointRequest", - "CreateDatapointRequestParams", - "CreateDatapointRequestTargetValue", - "CreateDatapointRequestTargetValueParams", - "CreateEvaluationRequestEvaluatorsItem", - "CreateEvaluationRequestEvaluatorsItemParams", - "CreateEvaluatorLogRequestJudgment", - "CreateEvaluatorLogRequestJudgmentParams", - "CreateEvaluatorLogRequestSpec", - "CreateEvaluatorLogRequestSpecParams", - "CreateEvaluatorLogResponse", - "CreateEvaluatorLogResponseParams", - "CreateFlowLogResponse", - "CreateFlowLogResponseParams", - "CreatePromptLogResponse", - "CreatePromptLogResponseParams", - "CreateRunRequestDataset", - "CreateRunRequestDatasetParams", - "CreateRunRequestVersion", - "CreateRunRequestVersionParams", - "CreateToolLogResponse", - "CreateToolLogResponseParams", - "DashboardConfiguration", - "DashboardConfigurationParams", - "DatapointResponse", - "DatapointResponseParams", - "DatapointResponseTargetValue", - "DatapointResponseTargetValueParams", - "DatasetResponse", - "DatasetResponseParams", - "DatasetsRequest", - "DirectoryResponse", - "DirectoryResponseParams", - "DirectoryWithParentsAndChildrenResponse", - "DirectoryWithParentsAndChildrenResponseFilesItem", - "DirectoryWithParentsAndChildrenResponseFilesItemParams", - "DirectoryWithParentsAndChildrenResponseParams", - "EnvironmentResponse", - "EnvironmentResponseParams", - "EnvironmentTag", - "EvaluateeRequest", - "EvaluateeRequestParams", - "EvaluateeResponse", - "EvaluateeResponseParams", - "EvaluationEvaluatorResponse", - "EvaluationEvaluatorResponseParams", - "EvaluationLogResponse", - "EvaluationLogResponseParams", - "EvaluationResponse", - "EvaluationResponseParams", - "EvaluationRunResponse", - "EvaluationRunResponseParams", - "EvaluationRunsResponse", - "EvaluationRunsResponseParams", - "EvaluationStats", - "EvaluationStatsParams", - "EvaluationStatus", - "EvaluationsDatasetRequest", - "EvaluationsRequest", - "EvaluatorActivationDeactivationRequest", - "EvaluatorActivationDeactivationRequestActivateItem", - "EvaluatorActivationDeactivationRequestActivateItemParams", - "EvaluatorActivationDeactivationRequestDeactivateItem", - "EvaluatorActivationDeactivationRequestDeactivateItemParams", - "EvaluatorActivationDeactivationRequestParams", - "EvaluatorAggregate", - "EvaluatorAggregateParams", - "EvaluatorArgumentsType", - "EvaluatorConfigResponse", - "EvaluatorConfigResponseParams", - "EvaluatorFileId", - "EvaluatorFileIdParams", - "EvaluatorFilePath", - "EvaluatorFilePathParams", - "EvaluatorJudgmentNumberLimit", - "EvaluatorJudgmentNumberLimitParams", - "EvaluatorJudgmentOptionResponse", - "EvaluatorJudgmentOptionResponseParams", - "EvaluatorLogResponse", - "EvaluatorLogResponseJudgment", - "EvaluatorLogResponseJudgmentParams", - "EvaluatorLogResponseParams", - "EvaluatorRequestSpec", - "EvaluatorRequestSpecParams", - "EvaluatorResponse", - "EvaluatorResponseParams", - "EvaluatorResponseSpec", - "EvaluatorResponseSpecParams", - "EvaluatorReturnTypeEnum", - "EvaluatorVersionId", - "EvaluatorVersionIdParams", - "EvaluatorsRequest", - "EventType", - "ExternalEvaluatorRequest", - "ExternalEvaluatorRequestParams", - "FeedbackType", - "FileEnvironmentResponse", - "FileEnvironmentResponseFile", - "FileEnvironmentResponseFileParams", - "FileEnvironmentResponseParams", - "FileEnvironmentVariableRequest", - "FileEnvironmentVariableRequestParams", - "FileId", - "FileIdParams", - "FilePath", - "FilePathParams", - "FileRequest", - "FileRequestParams", - "FileSortBy", - "FileType", - "FilesToolType", - "FlowKernelRequest", - "FlowKernelRequestParams", - "FlowLogResponse", - "FlowLogResponseParams", - "FlowResponse", - "FlowResponseParams", - "FunctionTool", - "FunctionToolChoice", - "FunctionToolChoiceParams", - "FunctionToolParams", - "HttpValidationError", - "HttpValidationErrorParams", - "HumanEvaluatorRequest", - "HumanEvaluatorRequestParams", - "HumanEvaluatorRequestReturnType", - "Humanloop", - "HumanloopEnvironment", - "ImageChatContent", - "ImageChatContentParams", - "ImageUrl", - "ImageUrlDetail", - "ImageUrlParams", - "InputResponse", - "InputResponseParams", - "LinkedFileRequest", - "LinkedFileRequestParams", - "LinkedToolResponse", - "LinkedToolResponseParams", - "ListAgents", - "ListAgentsParams", - "ListDatasets", - "ListDatasetsParams", - "ListEvaluators", - "ListEvaluatorsParams", - "ListFlows", - "ListFlowsParams", - "ListPrompts", - "ListPromptsParams", - "ListTools", - "ListToolsParams", - "ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints", - "LlmEvaluatorRequest", - "LlmEvaluatorRequestParams", - "LogResponse", - "LogResponseParams", - "LogStatus", - "LogStreamResponse", - "LogStreamResponseParams", - "ModelEndpoints", - "ModelProviders", - "MonitoringEvaluatorEnvironmentRequest", - "MonitoringEvaluatorEnvironmentRequestParams", - "MonitoringEvaluatorResponse", - "MonitoringEvaluatorResponseParams", - "MonitoringEvaluatorState", - "MonitoringEvaluatorVersionRequest", - "MonitoringEvaluatorVersionRequestParams", - "NumericEvaluatorStatsResponse", - "NumericEvaluatorStatsResponseParams", - "ObservabilityStatus", - "OnAgentCallEnum", - "OpenAiReasoningEffort", - "OverallStats", - "OverallStatsParams", - "PaginatedDataAgentResponse", - "PaginatedDataAgentResponseParams", - "PaginatedDataEvaluationLogResponse", - "PaginatedDataEvaluationLogResponseParams", - "PaginatedDataEvaluatorResponse", - "PaginatedDataEvaluatorResponseParams", - "PaginatedDataFlowResponse", - "PaginatedDataFlowResponseParams", - "PaginatedDataLogResponse", - "PaginatedDataLogResponseParams", - "PaginatedDataPromptResponse", - "PaginatedDataPromptResponseParams", - "PaginatedDataToolResponse", - "PaginatedDataToolResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", - "PaginatedDatapointResponse", - "PaginatedDatapointResponseParams", - "PaginatedDatasetResponse", - "PaginatedDatasetResponseParams", - "PaginatedEvaluationResponse", - "PaginatedEvaluationResponseParams", - "PaginatedPromptLogResponse", - "PaginatedSessionResponse", - "PlatformAccessEnum", - "PopulateTemplateResponse", - "PopulateTemplateResponseParams", - "PopulateTemplateResponsePopulatedTemplate", - "PopulateTemplateResponsePopulatedTemplateParams", - "PopulateTemplateResponseReasoningEffort", - "PopulateTemplateResponseReasoningEffortParams", - "PopulateTemplateResponseStop", - "PopulateTemplateResponseStopParams", - "PopulateTemplateResponseTemplate", - "PopulateTemplateResponseTemplateParams", - "PromptCallLogResponse", - "PromptCallLogResponseParams", - "PromptCallResponse", - "PromptCallResponseParams", - "PromptCallResponseToolChoice", - "PromptCallResponseToolChoiceParams", - "PromptCallStreamResponse", - "PromptCallStreamResponseParams", - "PromptKernelRequest", - "PromptKernelRequestParams", - "PromptKernelRequestReasoningEffort", - "PromptKernelRequestReasoningEffortParams", - "PromptKernelRequestStop", - "PromptKernelRequestStopParams", - "PromptKernelRequestTemplate", - "PromptKernelRequestTemplateParams", - "PromptLogRequestPrompt", - "PromptLogRequestPromptParams", - "PromptLogRequestToolChoice", - "PromptLogRequestToolChoiceParams", - "PromptLogResponse", - "PromptLogResponseParams", - "PromptLogResponseToolChoice", - "PromptLogResponseToolChoiceParams", - "PromptLogUpdateRequestToolChoice", - "PromptLogUpdateRequestToolChoiceParams", - "PromptRequestReasoningEffort", - "PromptRequestReasoningEffortParams", - "PromptRequestStop", - "PromptRequestStopParams", - "PromptRequestTemplate", - "PromptRequestTemplateParams", - "PromptResponse", - "PromptResponseParams", - "PromptResponseReasoningEffort", - "PromptResponseReasoningEffortParams", - "PromptResponseStop", - "PromptResponseStopParams", - "PromptResponseTemplate", - "PromptResponseTemplateParams", - "PromptsCallRequestPrompt", - "PromptsCallRequestPromptParams", - "PromptsCallRequestToolChoice", - "PromptsCallRequestToolChoiceParams", - "PromptsCallStreamRequestPrompt", - "PromptsCallStreamRequestPromptParams", - "PromptsCallStreamRequestToolChoice", - "PromptsCallStreamRequestToolChoiceParams", - "ProviderApiKeys", - "ProviderApiKeysParams", - "ResponseFormat", - "ResponseFormatParams", - "ResponseFormatType", - "RetrieveByPathFilesRetrieveByPathPostResponse", - "RetrieveByPathFilesRetrieveByPathPostResponseParams", - "RunStatsResponse", - "RunStatsResponseEvaluatorStatsItem", - "RunStatsResponseEvaluatorStatsItemParams", - "RunStatsResponseParams", - "RunVersionResponse", - "RunVersionResponseParams", - "SelectEvaluatorStatsResponse", - "SelectEvaluatorStatsResponseParams", - "SortOrder", - "TemplateLanguage", - "TextChatContent", - "TextChatContentParams", - "TextEvaluatorStatsResponse", - "TextEvaluatorStatsResponseParams", - "TimeUnit", - "ToolCall", - "ToolCallParams", - "ToolCallResponse", - "ToolCallResponseParams", - "ToolChoice", - "ToolChoiceParams", - "ToolFunction", - "ToolFunctionParams", - "ToolKernelRequest", - "ToolKernelRequestParams", - "ToolLogResponse", - "ToolLogResponseParams", - "ToolResponse", - "ToolResponseParams", - "UnprocessableEntityError", - "UpdateDatesetAction", - "UpdateEvaluationStatusRequest", - "UpdateVersionRequest", - "UpdateVersionRequestParams", - "UserResponse", - "Valence", - "ValidationError", - "ValidationErrorLocItem", - "ValidationErrorLocItemParams", - "ValidationErrorParams", - "VersionDeploymentResponse", - "VersionDeploymentResponseFile", - "VersionDeploymentResponseFileParams", - "VersionDeploymentResponseParams", - "VersionId", - "VersionIdParams", - "VersionIdResponse", - "VersionIdResponseParams", - "VersionIdResponseVersion", - "VersionIdResponseVersionParams", - "VersionReferenceResponse", - "VersionReferenceResponseParams", - "VersionStatsResponse", - "VersionStatsResponseEvaluatorVersionStatsItem", - "VersionStatsResponseEvaluatorVersionStatsItemParams", - "VersionStatsResponseParams", - "VersionStatus", - "__version__", - "agents", - "datasets", - "directories", - "evaluations", - "evaluators", - "files", - "flows", - "logs", - "prompts", - "tools", -] diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py deleted file mode 100644 index e8a63fd6..00000000 --- a/src/humanloop/agents/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .types import ( - AgentLogRequestAgent, - AgentLogRequestToolChoice, - AgentRequestReasoningEffort, - AgentRequestStop, - AgentRequestTemplate, - AgentRequestToolsItem, - AgentsCallRequestAgent, - AgentsCallRequestToolChoice, - AgentsCallStreamRequestAgent, - AgentsCallStreamRequestToolChoice, -) -from .requests import ( - AgentLogRequestAgentParams, - AgentLogRequestToolChoiceParams, - AgentRequestReasoningEffortParams, - AgentRequestStopParams, - AgentRequestTemplateParams, - AgentRequestToolsItemParams, - AgentsCallRequestAgentParams, - AgentsCallRequestToolChoiceParams, - AgentsCallStreamRequestAgentParams, - AgentsCallStreamRequestToolChoiceParams, -) - -__all__ = [ - "AgentLogRequestAgent", - "AgentLogRequestAgentParams", - "AgentLogRequestToolChoice", - "AgentLogRequestToolChoiceParams", - "AgentRequestReasoningEffort", - "AgentRequestReasoningEffortParams", - "AgentRequestStop", - "AgentRequestStopParams", - "AgentRequestTemplate", - "AgentRequestTemplateParams", - "AgentRequestToolsItem", - "AgentRequestToolsItemParams", - "AgentsCallRequestAgent", - "AgentsCallRequestAgentParams", - "AgentsCallRequestToolChoice", - "AgentsCallRequestToolChoiceParams", - "AgentsCallStreamRequestAgent", - "AgentsCallStreamRequestAgentParams", - "AgentsCallStreamRequestToolChoice", - "AgentsCallStreamRequestToolChoiceParams", -] diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py deleted file mode 100644 index ab7b887c..00000000 --- a/src/humanloop/agents/client.py +++ /dev/null @@ -1,2946 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.provider_api_keys import ProviderApiKeysParams -from ..requests.response_format import ResponseFormatParams -from ..types.agent_call_response import AgentCallResponse -from ..types.agent_call_stream_response import AgentCallStreamResponse -from ..types.agent_continue_call_response import AgentContinueCallResponse -from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse -from ..types.agent_kernel_request import AgentKernelRequest -from ..types.agent_log_response import AgentLogResponse -from ..types.agent_response import AgentResponse -from ..types.create_agent_log_response import CreateAgentLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.list_agents import ListAgents -from ..types.log_status import LogStatus -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.sort_order import SortOrder -from ..types.template_language import TemplateLanguage -from .raw_client import AsyncRawAgentsClient, RawAgentsClient -from .requests.agent_log_request_agent import AgentLogRequestAgentParams -from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams -from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams -from .requests.agent_request_stop import AgentRequestStopParams -from .requests.agent_request_template import AgentRequestTemplateParams -from .requests.agent_request_tools_item import AgentRequestToolsItemParams -from .requests.agents_call_request_agent import AgentsCallRequestAgentParams -from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams -from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams -from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class AgentsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawAgentsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawAgentsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawAgentsClient - """ - return self._raw_client - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agent_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateAgentLogResponse: - """ - Create an Agent Log. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise, the default deployed version will be chosen. - - If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentLogRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agent_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateAgentLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' - , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} - , 'additionalProperties': False - , 'required': ['output'] - }, 'strict': True}, 'on_agent_call': "stop"}]}, ) - """ - _response = self._raw_client.log( - version_id=version_id, - environment=environment, - run_id=run_id, - path=path, - id=id, - output_message=output_message, - prompt_tokens=prompt_tokens, - reasoning_tokens=reasoning_tokens, - output_tokens=output_tokens, - prompt_cost=prompt_cost, - output_cost=output_cost, - finish_reason=finish_reason, - messages=messages, - tool_choice=tool_choice, - agent=agent, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - agent_log_request_environment=agent_log_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - def update_log( - self, - id: str, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentLogResponse: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - log_id : str - Unique identifier for the Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[{'role': "user", 'content': 'I need to withdraw $1000'}, {'role': "assistant", 'content': 'Of course! Would you like to use your savings or checking account?'}], output_message={'role': "assistant", 'content': "I'm sorry, I can't help with that."}, log_status="complete", ) - """ - _response = self._raw_client.update_log( - id, - log_id, - messages=messages, - output_message=output_message, - inputs=inputs, - output=output, - error=error, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[AgentCallStreamResponse]: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallStreamRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.Iterator[AgentCallStreamResponse] - - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.agents.call_stream() - for chunk in response: - yield chunk - """ - with self._raw_client.call_stream( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - agent=agent, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - agents_call_stream_request_environment=agents_call_stream_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - return_inputs=return_inputs, - include_trace_children=include_trace_children, - request_options=request_options, - ) as r: - yield from r.data - - def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentCallResponse: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentCallResponse - - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], ) - """ - _response = self._raw_client.call( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - agent=agent, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - agents_call_request_environment=agents_call_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - return_inputs=return_inputs, - include_trace_children=include_trace_children, - request_options=request_options, - ) - return _response.data - - def continue_call_stream( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[AgentContinueCallStreamResponse]: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.Iterator[AgentContinueCallStreamResponse] - - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.agents.continue_call_stream(log_id='log_id', messages=[{'role': "user"}], ) - for chunk in response: - yield chunk - """ - with self._raw_client.continue_call_stream( - log_id=log_id, - messages=messages, - provider_api_keys=provider_api_keys, - include_trace_children=include_trace_children, - request_options=request_options, - ) as r: - yield from r.data - - def continue_call( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentContinueCallResponse: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentContinueCallResponse - - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.continue_call(log_id='log_1234567890', messages=[{'role': "tool", 'content': '{"type": "checking", "balance": 5200}', 'tool_call_id': 'tc_1234567890'}], ) - """ - _response = self._raw_client.continue_call( - log_id=log_id, - messages=messages, - provider_api_keys=provider_api_keys, - include_trace_children=include_trace_children, - request_options=request_options, - ) - return _response.data - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[AgentResponse]: - """ - Get a list of all Agents. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Agents to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Agent name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Agents by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[AgentResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.agents.list(size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[AgentRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[AgentRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_iterations: typing.Optional[int] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Create an Agent or update it with a new version if it already exists. - - Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and - tools determine the versions of the Agent. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Agent - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[AgentRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[AgentRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - max_iterations : typing.Optional[int] - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Each Prompt can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.upsert(path='Banking/Teller Agent', provider="anthropic", endpoint="chat", model='claude-3-7-sonnet-latest', reasoning_effort=1024, template=[{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], max_iterations=3, tools=[{'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' - , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} - , 'additionalProperties': False - , 'required': ['output'] - }, 'strict': True}, 'on_agent_call': "stop"}], version_name='teller-agent-v1', version_description='Initial version', ) - """ - _response = self._raw_client.upsert( - model=model, - path=path, - id=id, - endpoint=endpoint, - template=template, - template_language=template_language, - provider=provider, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - stop=stop, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - other=other, - seed=seed, - response_format=response_format, - reasoning_effort=reasoning_effort, - tools=tools, - attributes=attributes, - max_iterations=max_iterations, - version_name=version_name, - version_description=version_description, - description=description, - tags=tags, - readme=readme, - request_options=request_options, - ) - return _response.data - - def delete_agent_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.delete_agent_version(id='ag_1234567890', version_id='agv_1234567890', ) - """ - _response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options) - return _response.data - - def patch_agent_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Update the name or description of the Agent version. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.patch_agent_version(id='ag_1234567890', version_id='agv_1234567890', name='teller-agent-v2', description='Updated version', ) - """ - _response = self._raw_client.patch_agent_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Retrieve the Agent with the given ID. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.get(id='ag_1234567890', ) - """ - _response = self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Agent with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.delete(id='ag_1234567890', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Move the Agent to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.move(id='ag_1234567890', path='new directory/new name', ) - """ - _response = self._raw_client.move( - id, path=path, name=name, directory_id=directory_id, request_options=request_options - ) - return _response.data - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListAgents: - """ - Get a list of all the versions of a Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListAgents - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.list_versions(id='ag_1234567890', ) - """ - _response = self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AgentResponse: - """ - Deploy Agent to an Environment. - - Set the deployed version for the specified Environment. This Agent - will be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) - """ - _response = self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Agent from the Environment. - - Remove the deployed version for the specified Environment. This Agent - will no longer be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.remove_deployment(id='id', environment_id='environment_id', ) - """ - _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.list_environments(id='ag_1234567890', ) - """ - _response = self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Activate and deactivate Evaluators for monitoring the Agent. - - An activated Evaluator will automatically be run on all new Logs - within the Agent for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.update_monitoring(id='ag_1234567890', activate=[{'evaluator_version_id': 'ev_1234567890'}, {'evaluator_id': 'ev_2345678901', 'environment_id': 'env_1234567890'}], deactivate=[{'evaluator_version_id': 'ev_0987654321'}], ) - """ - _response = self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> str: - """ - Serialize an Agent to the .agent file format. - - Useful for storing the Agent with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - str - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.serialize(id='id', ) - """ - _response = self._raw_client.serialize( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest: - """ - Deserialize an Agent from the .agent file format. - - This returns a subset of the attributes required by an Agent. - This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - agent : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentKernelRequest - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.agents.deserialize(agent='agent', ) - """ - _response = self._raw_client.deserialize(agent=agent, request_options=request_options) - return _response.data - - -class AsyncAgentsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawAgentsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawAgentsClient - """ - return self._raw_client - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agent_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateAgentLogResponse: - """ - Create an Agent Log. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise, the default deployed version will be chosen. - - If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentLogRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agent_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateAgentLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' - , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} - , 'additionalProperties': False - , 'required': ['output'] - }, 'strict': True}, 'on_agent_call': "stop"}]}, ) - asyncio.run(main()) - """ - _response = await self._raw_client.log( - version_id=version_id, - environment=environment, - run_id=run_id, - path=path, - id=id, - output_message=output_message, - prompt_tokens=prompt_tokens, - reasoning_tokens=reasoning_tokens, - output_tokens=output_tokens, - prompt_cost=prompt_cost, - output_cost=output_cost, - finish_reason=finish_reason, - messages=messages, - tool_choice=tool_choice, - agent=agent, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - agent_log_request_environment=agent_log_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - async def update_log( - self, - id: str, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentLogResponse: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - log_id : str - Unique identifier for the Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[{'role': "user", 'content': 'I need to withdraw $1000'}, {'role': "assistant", 'content': 'Of course! Would you like to use your savings or checking account?'}], output_message={'role': "assistant", 'content': "I'm sorry, I can't help with that."}, log_status="complete", ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_log( - id, - log_id, - messages=messages, - output_message=output_message, - inputs=inputs, - output=output, - error=error, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - async def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[AgentCallStreamResponse]: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallStreamRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.AsyncIterator[AgentCallStreamResponse] - - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.agents.call_stream() - async for chunk in response: - yield chunk - asyncio.run(main()) - """ - async with self._raw_client.call_stream( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - agent=agent, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - agents_call_stream_request_environment=agents_call_stream_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - return_inputs=return_inputs, - include_trace_children=include_trace_children, - request_options=request_options, - ) as r: - async for data in r.data: - yield data - - async def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentCallResponse: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentCallResponse - - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.call( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - agent=agent, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - agents_call_request_environment=agents_call_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - return_inputs=return_inputs, - include_trace_children=include_trace_children, - request_options=request_options, - ) - return _response.data - - async def continue_call_stream( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[AgentContinueCallStreamResponse]: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.AsyncIterator[AgentContinueCallStreamResponse] - - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.agents.continue_call_stream(log_id='log_id', messages=[{'role': "user"}], ) - async for chunk in response: - yield chunk - asyncio.run(main()) - """ - async with self._raw_client.continue_call_stream( - log_id=log_id, - messages=messages, - provider_api_keys=provider_api_keys, - include_trace_children=include_trace_children, - request_options=request_options, - ) as r: - async for data in r.data: - yield data - - async def continue_call( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentContinueCallResponse: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentContinueCallResponse - - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.continue_call(log_id='log_1234567890', messages=[{'role': "tool", 'content': '{"type": "checking", "balance": 5200}', 'tool_call_id': 'tc_1234567890'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.continue_call( - log_id=log_id, - messages=messages, - provider_api_keys=provider_api_keys, - include_trace_children=include_trace_children, - request_options=request_options, - ) - return _response.data - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[AgentResponse]: - """ - Get a list of all Agents. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Agents to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Agent name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Agents by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[AgentResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.agents.list(size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - async def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[AgentRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[AgentRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_iterations: typing.Optional[int] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Create an Agent or update it with a new version if it already exists. - - Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and - tools determine the versions of the Agent. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Agent - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[AgentRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[AgentRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - max_iterations : typing.Optional[int] - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Each Prompt can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.upsert(path='Banking/Teller Agent', provider="anthropic", endpoint="chat", model='claude-3-7-sonnet-latest', reasoning_effort=1024, template=[{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], max_iterations=3, tools=[{'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' - , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} - , 'additionalProperties': False - , 'required': ['output'] - }, 'strict': True}, 'on_agent_call': "stop"}], version_name='teller-agent-v1', version_description='Initial version', ) - asyncio.run(main()) - """ - _response = await self._raw_client.upsert( - model=model, - path=path, - id=id, - endpoint=endpoint, - template=template, - template_language=template_language, - provider=provider, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - stop=stop, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - other=other, - seed=seed, - response_format=response_format, - reasoning_effort=reasoning_effort, - tools=tools, - attributes=attributes, - max_iterations=max_iterations, - version_name=version_name, - version_description=version_description, - description=description, - tags=tags, - readme=readme, - request_options=request_options, - ) - return _response.data - - async def delete_agent_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.delete_agent_version(id='ag_1234567890', version_id='agv_1234567890', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options) - return _response.data - - async def patch_agent_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Update the name or description of the Agent version. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.patch_agent_version(id='ag_1234567890', version_id='agv_1234567890', name='teller-agent-v2', description='Updated version', ) - asyncio.run(main()) - """ - _response = await self._raw_client.patch_agent_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Retrieve the Agent with the given ID. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.get(id='ag_1234567890', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Agent with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.delete(id='ag_1234567890', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Move the Agent to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.move(id='ag_1234567890', path='new directory/new name', ) - asyncio.run(main()) - """ - _response = await self._raw_client.move( - id, path=path, name=name, directory_id=directory_id, request_options=request_options - ) - return _response.data - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListAgents: - """ - Get a list of all the versions of a Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListAgents - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.list_versions(id='ag_1234567890', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AgentResponse: - """ - Deploy Agent to an Environment. - - Set the deployed version for the specified Environment. This Agent - will be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Agent from the Environment. - - Remove the deployed version for the specified Environment. This Agent - will no longer be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.remove_deployment(id='id', environment_id='environment_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.list_environments(id='ag_1234567890', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AgentResponse: - """ - Activate and deactivate Evaluators for monitoring the Agent. - - An activated Evaluator will automatically be run on all new Logs - within the Agent for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.update_monitoring(id='ag_1234567890', activate=[{'evaluator_version_id': 'ev_1234567890'}, {'evaluator_id': 'ev_2345678901', 'environment_id': 'env_1234567890'}], deactivate=[{'evaluator_version_id': 'ev_0987654321'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - async def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> str: - """ - Serialize an Agent to the .agent file format. - - Useful for storing the Agent with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - str - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.serialize(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.serialize( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def deserialize( - self, *, agent: str, request_options: typing.Optional[RequestOptions] = None - ) -> AgentKernelRequest: - """ - Deserialize an Agent from the .agent file format. - - This returns a subset of the attributes required by an Agent. - This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - agent : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AgentKernelRequest - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.agents.deserialize(agent='agent', ) - asyncio.run(main()) - """ - _response = await self._raw_client.deserialize(agent=agent, request_options=request_options) - return _response.data diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py deleted file mode 100644 index e577f8c2..00000000 --- a/src/humanloop/agents/raw_client.py +++ /dev/null @@ -1,4021 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import contextlib -import datetime as dt -import json -import typing -from json.decoder import JSONDecodeError - -import httpx_sse -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.provider_api_keys import ProviderApiKeysParams -from ..requests.response_format import ResponseFormatParams -from ..types.agent_call_response import AgentCallResponse -from ..types.agent_call_stream_response import AgentCallStreamResponse -from ..types.agent_continue_call_response import AgentContinueCallResponse -from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse -from ..types.agent_kernel_request import AgentKernelRequest -from ..types.agent_log_response import AgentLogResponse -from ..types.agent_response import AgentResponse -from ..types.create_agent_log_response import CreateAgentLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.http_validation_error import HttpValidationError -from ..types.list_agents import ListAgents -from ..types.log_status import LogStatus -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.paginated_data_agent_response import PaginatedDataAgentResponse -from ..types.sort_order import SortOrder -from ..types.template_language import TemplateLanguage -from .requests.agent_log_request_agent import AgentLogRequestAgentParams -from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams -from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams -from .requests.agent_request_stop import AgentRequestStopParams -from .requests.agent_request_template import AgentRequestTemplateParams -from .requests.agent_request_tools_item import AgentRequestToolsItemParams -from .requests.agents_call_request_agent import AgentsCallRequestAgentParams -from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams -from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams -from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawAgentsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agent_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[CreateAgentLogResponse]: - """ - Create an Agent Log. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise, the default deployed version will be chosen. - - If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentLogRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agent_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[CreateAgentLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "agents/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "run_id": run_id, - "path": path, - "id": id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" - ), - "agent": convert_and_respect_annotation_metadata( - object_=agent, annotation=AgentLogRequestAgentParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": agent_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateAgentLogResponse, - construct_type( - type_=CreateAgentLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_log( - self, - id: str, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentLogResponse]: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - log_id : str - Unique identifier for the Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "inputs": inputs, - "output": output, - "error": error, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentLogResponse, - construct_type( - type_=AgentLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - @contextlib.contextmanager - def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallStreamRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]] - - """ - with self._client_wrapper.httpx_client.stream( - "agents/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" - ), - "agent": convert_and_respect_annotation_metadata( - object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": agents_call_stream_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "return_inputs": return_inputs, - "include_trace_children": include_trace_children, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) as _response: - - def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]: - try: - if 200 <= _response.status_code < 300: - - def _iter(): - _event_source = httpx_sse.EventSource(_response) - for _sse in _event_source.iter_sse(): - if _sse.data == None: - return - try: - yield typing.cast( - AgentCallStreamResponse, - construct_type( - type_=AgentCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except Exception: - pass - return - - return HttpResponse(response=_response, data=_iter()) - _response.read() - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError( - status_code=_response.status_code, headers=dict(_response.headers), body=_response.text - ) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - yield stream() - - def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentCallResponse]: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentCallResponse] - - """ - _response = self._client_wrapper.httpx_client.request( - "agents/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" - ), - "agent": convert_and_respect_annotation_metadata( - object_=agent, annotation=AgentsCallRequestAgentParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": agents_call_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "return_inputs": return_inputs, - "include_trace_children": include_trace_children, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentCallResponse, - construct_type( - type_=AgentCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - @contextlib.contextmanager - def continue_call_stream( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]] - - """ - with self._client_wrapper.httpx_client.stream( - "agents/continue", - method="POST", - json={ - "log_id": log_id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "include_trace_children": include_trace_children, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) as _response: - - def stream() -> HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]: - try: - if 200 <= _response.status_code < 300: - - def _iter(): - _event_source = httpx_sse.EventSource(_response) - for _sse in _event_source.iter_sse(): - if _sse.data == None: - return - try: - yield typing.cast( - AgentContinueCallStreamResponse, - construct_type( - type_=AgentContinueCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except Exception: - pass - return - - return HttpResponse(response=_response, data=_iter()) - _response.read() - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError( - status_code=_response.status_code, headers=dict(_response.headers), body=_response.text - ) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - yield stream() - - def continue_call( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentContinueCallResponse]: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentContinueCallResponse] - - """ - _response = self._client_wrapper.httpx_client.request( - "agents/continue", - method="POST", - json={ - "log_id": log_id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "include_trace_children": include_trace_children, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentContinueCallResponse, - construct_type( - type_=AgentContinueCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[AgentResponse]: - """ - Get a list of all Agents. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Agents to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Agent name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Agents by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[AgentResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "agents", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataAgentResponse, - construct_type( - type_=PaginatedDataAgentResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[AgentRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[AgentRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_iterations: typing.Optional[int] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentResponse]: - """ - Create an Agent or update it with a new version if it already exists. - - Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and - tools determine the versions of the Agent. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Agent - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[AgentRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[AgentRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - max_iterations : typing.Optional[int] - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Each Prompt can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "agents", - method="POST", - json={ - "path": path, - "id": id, - "model": model, - "endpoint": endpoint, - "template": convert_and_respect_annotation_metadata( - object_=template, annotation=AgentRequestTemplateParams, direction="write" - ), - "template_language": template_language, - "provider": provider, - "max_tokens": max_tokens, - "temperature": temperature, - "top_p": top_p, - "stop": convert_and_respect_annotation_metadata( - object_=stop, annotation=AgentRequestStopParams, direction="write" - ), - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "other": other, - "seed": seed, - "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormatParams, direction="write" - ), - "reasoning_effort": convert_and_respect_annotation_metadata( - object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write" - ), - "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" - ), - "attributes": attributes, - "max_iterations": max_iterations, - "version_name": version_name, - "version_description": version_description, - "description": description, - "tags": tags, - "readme": readme, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete_agent_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Delete a version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def patch_agent_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentResponse]: - """ - Update the name or description of the Agent version. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentResponse]: - """ - Retrieve the Agent with the given ID. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete the Agent with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentResponse]: - """ - Move the Agent to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - "directory_id": directory_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ListAgents]: - """ - Get a list of all the versions of a Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ListAgents] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListAgents, - construct_type( - type_=ListAgents, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[AgentResponse]: - """ - Deploy Agent to an Environment. - - Set the deployed version for the specified Environment. This Agent - will be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Remove deployed Agent from the Environment. - - Remove the deployed version for the specified Environment. This Agent - will no longer be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[AgentResponse]: - """ - Activate and deactivate Evaluators for monitoring the Agent. - - An activated Evaluator will automatically be run on all new Logs - within the Agent for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[str]: - """ - Serialize an Agent to the .agent file format. - - Useful for storing the Agent with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[str] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/serialize", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=_response.text) # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def deserialize( - self, *, agent: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[AgentKernelRequest]: - """ - Deserialize an Agent from the .agent file format. - - This returns a subset of the attributes required by an Agent. - This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - agent : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[AgentKernelRequest] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "agents/deserialize", - method="POST", - json={ - "agent": agent, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentKernelRequest, - construct_type( - type_=AgentKernelRequest, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawAgentsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agent_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[CreateAgentLogResponse]: - """ - Create an Agent Log. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise, the default deployed version will be chosen. - - If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentLogRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agent_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[CreateAgentLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "agents/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "run_id": run_id, - "path": path, - "id": id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" - ), - "agent": convert_and_respect_annotation_metadata( - object_=agent, annotation=AgentLogRequestAgentParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": agent_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateAgentLogResponse, - construct_type( - type_=CreateAgentLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_log( - self, - id: str, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentLogResponse]: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - log_id : str - Unique identifier for the Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "inputs": inputs, - "output": output, - "error": error, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentLogResponse, - construct_type( - type_=AgentLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - @contextlib.asynccontextmanager - async def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallStreamRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]] - - """ - async with self._client_wrapper.httpx_client.stream( - "agents/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" - ), - "agent": convert_and_respect_annotation_metadata( - object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": agents_call_stream_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "return_inputs": return_inputs, - "include_trace_children": include_trace_children, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) as _response: - - async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]: - try: - if 200 <= _response.status_code < 300: - - async def _iter(): - _event_source = httpx_sse.EventSource(_response) - async for _sse in _event_source.aiter_sse(): - if _sse.data == None: - return - try: - yield typing.cast( - AgentCallStreamResponse, - construct_type( - type_=AgentCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except Exception: - pass - return - - return AsyncHttpResponse(response=_response, data=_iter()) - await _response.aread() - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError( - status_code=_response.status_code, headers=dict(_response.headers), body=_response.text - ) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - yield await stream() - - async def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, - agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - agents_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentCallResponse]: - """ - Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. - - If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, - pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. - - The agent will run for the maximum number of iterations, or until it encounters a stop condition, - according to its configuration. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Agent. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Agent details in the request body. A new version is created if it does not match - any existing ones. This is helpful in the case where you are storing or deriving - your Agent details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Agent to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - agent : typing.Optional[AgentsCallRequestAgentParams] - The Agent configuration to use. Two formats are supported: - - An object representing the details of the Agent configuration - - A string representing the raw contents of a .agent file - A new Agent version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - agents_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentCallResponse] - - """ - _response = await self._client_wrapper.httpx_client.request( - "agents/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" - ), - "agent": convert_and_respect_annotation_metadata( - object_=agent, annotation=AgentsCallRequestAgentParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": agents_call_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "return_inputs": return_inputs, - "include_trace_children": include_trace_children, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentCallResponse, - construct_type( - type_=AgentCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - @contextlib.asynccontextmanager - async def continue_call_stream( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]] - - """ - async with self._client_wrapper.httpx_client.stream( - "agents/continue", - method="POST", - json={ - "log_id": log_id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "include_trace_children": include_trace_children, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) as _response: - - async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]: - try: - if 200 <= _response.status_code < 300: - - async def _iter(): - _event_source = httpx_sse.EventSource(_response) - async for _sse in _event_source.aiter_sse(): - if _sse.data == None: - return - try: - yield typing.cast( - AgentContinueCallStreamResponse, - construct_type( - type_=AgentContinueCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except Exception: - pass - return - - return AsyncHttpResponse(response=_response, data=_iter()) - await _response.aread() - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError( - status_code=_response.status_code, headers=dict(_response.headers), body=_response.text - ) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - yield await stream() - - async def continue_call( - self, - *, - log_id: str, - messages: typing.Sequence[ChatMessageParams], - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - include_trace_children: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentContinueCallResponse]: - """ - Continue an incomplete Agent call. - - This endpoint allows continuing an existing incomplete Agent call, by passing the tool call - requested by the Agent. The Agent will resume processing from where it left off. - - The messages in the request will be appended to the original messages in the Log. You do not - have to provide the previous conversation history. - - The original log must be in an incomplete state to be continued. - - Parameters - ---------- - log_id : str - This identifies the Agent Log to continue. - - messages : typing.Sequence[ChatMessageParams] - The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the returned Agent Log. Defaults to false. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentContinueCallResponse] - - """ - _response = await self._client_wrapper.httpx_client.request( - "agents/continue", - method="POST", - json={ - "log_id": log_id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "include_trace_children": include_trace_children, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentContinueCallResponse, - construct_type( - type_=AgentContinueCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[AgentResponse]: - """ - Get a list of all Agents. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Agents to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Agent name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Agents by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[AgentResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "agents", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataAgentResponse, - construct_type( - type_=PaginatedDataAgentResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[AgentRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[AgentRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_iterations: typing.Optional[int] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentResponse]: - """ - Create an Agent or update it with a new version if it already exists. - - Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and - tools determine the versions of the Agent. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Agent - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Agent. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[AgentRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[AgentRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - max_iterations : typing.Optional[int] - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Each Prompt can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "agents", - method="POST", - json={ - "path": path, - "id": id, - "model": model, - "endpoint": endpoint, - "template": convert_and_respect_annotation_metadata( - object_=template, annotation=AgentRequestTemplateParams, direction="write" - ), - "template_language": template_language, - "provider": provider, - "max_tokens": max_tokens, - "temperature": temperature, - "top_p": top_p, - "stop": convert_and_respect_annotation_metadata( - object_=stop, annotation=AgentRequestStopParams, direction="write" - ), - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "other": other, - "seed": seed, - "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormatParams, direction="write" - ), - "reasoning_effort": convert_and_respect_annotation_metadata( - object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write" - ), - "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" - ), - "attributes": attributes, - "max_iterations": max_iterations, - "version_name": version_name, - "version_description": version_description, - "description": description, - "tags": tags, - "readme": readme, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete_agent_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete a version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def patch_agent_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentResponse]: - """ - Update the name or description of the Agent version. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : str - Unique identifier for the specific version of the Agent. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentResponse]: - """ - Retrieve the Agent with the given ID. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete the Agent with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentResponse]: - """ - Move the Agent to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - "directory_id": directory_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ListAgents]: - """ - Get a list of all the versions of a Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ListAgents] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListAgents, - construct_type( - type_=ListAgents, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[AgentResponse]: - """ - Deploy Agent to an Environment. - - Set the deployed version for the specified Environment. This Agent - will be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Remove deployed Agent from the Environment. - - Remove the deployed version for the specified Environment. This Agent - will no longer be used for calls made to the Agent in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[AgentResponse]: - """ - Activate and deactivate Evaluators for monitoring the Agent. - - An activated Evaluator will automatically be run on all new Logs - within the Agent for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentResponse, - construct_type( - type_=AgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[str]: - """ - Serialize an Agent to the .agent file format. - - Useful for storing the Agent with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Agent is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Agent. - - Parameters - ---------- - id : str - Unique identifier for Agent. - - version_id : typing.Optional[str] - A specific Version ID of the Agent to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[str] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"agents/{jsonable_encoder(id)}/serialize", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=_response.text) # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def deserialize( - self, *, agent: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[AgentKernelRequest]: - """ - Deserialize an Agent from the .agent file format. - - This returns a subset of the attributes required by an Agent. - This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - agent : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[AgentKernelRequest] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "agents/deserialize", - method="POST", - json={ - "agent": agent, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - AgentKernelRequest, - construct_type( - type_=AgentKernelRequest, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py deleted file mode 100644 index e02cfc67..00000000 --- a/src/humanloop/agents/requests/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .agent_log_request_agent import AgentLogRequestAgentParams -from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams -from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams -from .agent_request_stop import AgentRequestStopParams -from .agent_request_template import AgentRequestTemplateParams -from .agent_request_tools_item import AgentRequestToolsItemParams -from .agents_call_request_agent import AgentsCallRequestAgentParams -from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams -from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams -from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams - -__all__ = [ - "AgentLogRequestAgentParams", - "AgentLogRequestToolChoiceParams", - "AgentRequestReasoningEffortParams", - "AgentRequestStopParams", - "AgentRequestTemplateParams", - "AgentRequestToolsItemParams", - "AgentsCallRequestAgentParams", - "AgentsCallRequestToolChoiceParams", - "AgentsCallStreamRequestAgentParams", - "AgentsCallStreamRequestToolChoiceParams", -] diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py deleted file mode 100644 index 38a5adc4..00000000 --- a/src/humanloop/agents/requests/agent_log_request_agent.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.agent_kernel_request import AgentKernelRequestParams - -AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str] diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py deleted file mode 100644 index 02255e30..00000000 --- a/src/humanloop/agents/requests/agent_log_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.tool_choice import ToolChoiceParams - -AgentLogRequestToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py deleted file mode 100644 index dfc8de95..00000000 --- a/src/humanloop/agents/requests/agent_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort - -AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py deleted file mode 100644 index 3970451c..00000000 --- a/src/humanloop/agents/requests/agent_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py deleted file mode 100644 index 3b9c8c1f..00000000 --- a/src/humanloop/agents/requests/agent_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.chat_message import ChatMessageParams - -AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py deleted file mode 100644 index 3bf06108..00000000 --- a/src/humanloop/agents/requests/agent_request_tools_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.agent_inline_tool import AgentInlineToolParams -from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams - -AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py deleted file mode 100644 index 0123488f..00000000 --- a/src/humanloop/agents/requests/agents_call_request_agent.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.agent_kernel_request import AgentKernelRequestParams - -AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str] diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py deleted file mode 100644 index 9ebb0f75..00000000 --- a/src/humanloop/agents/requests/agents_call_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.tool_choice import ToolChoiceParams - -AgentsCallRequestToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py deleted file mode 100644 index eab2c55c..00000000 --- a/src/humanloop/agents/requests/agents_call_stream_request_agent.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.agent_kernel_request import AgentKernelRequestParams - -AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str] diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py deleted file mode 100644 index 40ad08c2..00000000 --- a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.tool_choice import ToolChoiceParams - -AgentsCallStreamRequestToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py deleted file mode 100644 index 0d9bf871..00000000 --- a/src/humanloop/agents/types/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .agent_log_request_agent import AgentLogRequestAgent -from .agent_log_request_tool_choice import AgentLogRequestToolChoice -from .agent_request_reasoning_effort import AgentRequestReasoningEffort -from .agent_request_stop import AgentRequestStop -from .agent_request_template import AgentRequestTemplate -from .agent_request_tools_item import AgentRequestToolsItem -from .agents_call_request_agent import AgentsCallRequestAgent -from .agents_call_request_tool_choice import AgentsCallRequestToolChoice -from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent -from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice - -__all__ = [ - "AgentLogRequestAgent", - "AgentLogRequestToolChoice", - "AgentRequestReasoningEffort", - "AgentRequestStop", - "AgentRequestTemplate", - "AgentRequestToolsItem", - "AgentsCallRequestAgent", - "AgentsCallRequestToolChoice", - "AgentsCallStreamRequestAgent", - "AgentsCallStreamRequestToolChoice", -] diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py deleted file mode 100644 index b0e52d93..00000000 --- a/src/humanloop/agents/types/agent_log_request_agent.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.agent_kernel_request import AgentKernelRequest - -AgentLogRequestAgent = typing.Union[AgentKernelRequest, str] diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py deleted file mode 100644 index b1d79f3a..00000000 --- a/src/humanloop/agents/types/agent_log_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.tool_choice import ToolChoice - -AgentLogRequestToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py deleted file mode 100644 index 3af67155..00000000 --- a/src/humanloop/agents/types/agent_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort - -AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py deleted file mode 100644 index 325a6b2e..00000000 --- a/src/humanloop/agents/types/agent_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py deleted file mode 100644 index c4da3e69..00000000 --- a/src/humanloop/agents/types/agent_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.chat_message import ChatMessage - -AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py deleted file mode 100644 index a43d160e..00000000 --- a/src/humanloop/agents/types/agent_request_tools_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.agent_inline_tool import AgentInlineTool -from ...types.agent_linked_file_request import AgentLinkedFileRequest - -AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py deleted file mode 100644 index 5cfbc669..00000000 --- a/src/humanloop/agents/types/agents_call_request_agent.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.agent_kernel_request import AgentKernelRequest - -AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str] diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py deleted file mode 100644 index aee291c9..00000000 --- a/src/humanloop/agents/types/agents_call_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.tool_choice import ToolChoice - -AgentsCallRequestToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py deleted file mode 100644 index c803d804..00000000 --- a/src/humanloop/agents/types/agents_call_stream_request_agent.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.agent_kernel_request import AgentKernelRequest - -AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str] diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py deleted file mode 100644 index 9e636efa..00000000 --- a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.tool_choice import ToolChoice - -AgentsCallStreamRequestToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py deleted file mode 100644 index 2234d799..00000000 --- a/src/humanloop/base_client.py +++ /dev/null @@ -1,170 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import os -import typing - -import httpx -from .agents.client import AgentsClient, AsyncAgentsClient -from .core.api_error import ApiError -from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from .datasets.client import AsyncDatasetsClient, DatasetsClient -from .directories.client import AsyncDirectoriesClient, DirectoriesClient -from .environment import HumanloopEnvironment -from .evaluations.client import AsyncEvaluationsClient, EvaluationsClient -from .evaluators.client import AsyncEvaluatorsClient, EvaluatorsClient -from .files.client import AsyncFilesClient, FilesClient -from .flows.client import AsyncFlowsClient, FlowsClient -from .logs.client import AsyncLogsClient, LogsClient -from .prompts.client import AsyncPromptsClient, PromptsClient -from .tools.client import AsyncToolsClient, ToolsClient - - -class BaseHumanloop: - """ - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. - - Parameters - ---------- - base_url : typing.Optional[str] - The base url to use for requests from the client. - - environment : HumanloopEnvironment - The environment to use for requests from the client. from .environment import HumanloopEnvironment - - Defaults to HumanloopEnvironment.DEFAULT - - - - api_key : typing.Optional[str] - timeout : typing.Optional[float] - The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. - - follow_redirects : typing.Optional[bool] - Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. - - httpx_client : typing.Optional[httpx.Client] - The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - """ - - def __init__( - self, - *, - base_url: typing.Optional[str] = None, - environment: HumanloopEnvironment = HumanloopEnvironment.DEFAULT, - api_key: typing.Optional[str] = os.getenv("HUMANLOOP_API_KEY"), - timeout: typing.Optional[float] = None, - follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.Client] = None, - ): - _defaulted_timeout = ( - timeout if timeout is not None else 60 if httpx_client is None else httpx_client.timeout.read - ) - if api_key is None: - raise ApiError( - body="The client must be instantiated be either passing in api_key or setting HUMANLOOP_API_KEY" - ) - self._client_wrapper = SyncClientWrapper( - base_url=_get_base_url(base_url=base_url, environment=environment), - api_key=api_key, - httpx_client=httpx_client - if httpx_client is not None - else httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects) - if follow_redirects is not None - else httpx.Client(timeout=_defaulted_timeout), - timeout=_defaulted_timeout, - ) - self.prompts = PromptsClient(client_wrapper=self._client_wrapper) - self.tools = ToolsClient(client_wrapper=self._client_wrapper) - self.datasets = DatasetsClient(client_wrapper=self._client_wrapper) - self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper) - self.flows = FlowsClient(client_wrapper=self._client_wrapper) - self.agents = AgentsClient(client_wrapper=self._client_wrapper) - self.directories = DirectoriesClient(client_wrapper=self._client_wrapper) - self.files = FilesClient(client_wrapper=self._client_wrapper) - self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper) - self.logs = LogsClient(client_wrapper=self._client_wrapper) - - -class AsyncBaseHumanloop: - """ - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. - - Parameters - ---------- - base_url : typing.Optional[str] - The base url to use for requests from the client. - - environment : HumanloopEnvironment - The environment to use for requests from the client. from .environment import HumanloopEnvironment - - Defaults to HumanloopEnvironment.DEFAULT - - - - api_key : typing.Optional[str] - timeout : typing.Optional[float] - The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. - - follow_redirects : typing.Optional[bool] - Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. - - httpx_client : typing.Optional[httpx.AsyncClient] - The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. - - Examples - -------- - from humanloop import AsyncHumanloop - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - """ - - def __init__( - self, - *, - base_url: typing.Optional[str] = None, - environment: HumanloopEnvironment = HumanloopEnvironment.DEFAULT, - api_key: typing.Optional[str] = os.getenv("HUMANLOOP_API_KEY"), - timeout: typing.Optional[float] = None, - follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.AsyncClient] = None, - ): - _defaulted_timeout = ( - timeout if timeout is not None else 60 if httpx_client is None else httpx_client.timeout.read - ) - if api_key is None: - raise ApiError( - body="The client must be instantiated be either passing in api_key or setting HUMANLOOP_API_KEY" - ) - self._client_wrapper = AsyncClientWrapper( - base_url=_get_base_url(base_url=base_url, environment=environment), - api_key=api_key, - httpx_client=httpx_client - if httpx_client is not None - else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) - if follow_redirects is not None - else httpx.AsyncClient(timeout=_defaulted_timeout), - timeout=_defaulted_timeout, - ) - self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) - self.tools = AsyncToolsClient(client_wrapper=self._client_wrapper) - self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper) - self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper) - self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper) - self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper) - self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper) - self.files = AsyncFilesClient(client_wrapper=self._client_wrapper) - self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper) - self.logs = AsyncLogsClient(client_wrapper=self._client_wrapper) - - -def _get_base_url(*, base_url: typing.Optional[str] = None, environment: HumanloopEnvironment) -> str: - if base_url is not None: - return base_url - elif environment is not None: - return environment.value - else: - raise Exception("Please pass in either base_url or environment to construct the client") diff --git a/src/humanloop/core/__init__.py b/src/humanloop/core/__init__.py deleted file mode 100644 index 48f3afaa..00000000 --- a/src/humanloop/core/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .api_error import ApiError -from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper -from .datetime_utils import serialize_datetime -from .file import File, convert_file_dict_to_httpx_tuples, with_content_type -from .http_client import AsyncHttpClient, HttpClient -from .http_response import AsyncHttpResponse, HttpResponse -from .jsonable_encoder import jsonable_encoder -from .pagination import AsyncPager, SyncPager -from .pydantic_utilities import ( - IS_PYDANTIC_V2, - UniversalBaseModel, - UniversalRootModel, - parse_obj_as, - universal_field_validator, - universal_root_validator, - update_forward_refs, -) -from .query_encoder import encode_query -from .remove_none_from_dict import remove_none_from_dict -from .request_options import RequestOptions -from .serialization import FieldMetadata, convert_and_respect_annotation_metadata -from .unchecked_base_model import UncheckedBaseModel, UnionMetadata, construct_type - -__all__ = [ - "ApiError", - "AsyncClientWrapper", - "AsyncHttpClient", - "AsyncHttpResponse", - "AsyncPager", - "BaseClientWrapper", - "FieldMetadata", - "File", - "HttpClient", - "HttpResponse", - "IS_PYDANTIC_V2", - "RequestOptions", - "SyncClientWrapper", - "SyncPager", - "UncheckedBaseModel", - "UnionMetadata", - "UniversalBaseModel", - "UniversalRootModel", - "construct_type", - "convert_and_respect_annotation_metadata", - "convert_file_dict_to_httpx_tuples", - "encode_query", - "jsonable_encoder", - "parse_obj_as", - "remove_none_from_dict", - "serialize_datetime", - "universal_field_validator", - "universal_root_validator", - "update_forward_refs", - "with_content_type", -] diff --git a/src/humanloop/core/api_error.py b/src/humanloop/core/api_error.py deleted file mode 100644 index 6f850a60..00000000 --- a/src/humanloop/core/api_error.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from typing import Any, Dict, Optional - - -class ApiError(Exception): - headers: Optional[Dict[str, str]] - status_code: Optional[int] - body: Any - - def __init__( - self, - *, - headers: Optional[Dict[str, str]] = None, - status_code: Optional[int] = None, - body: Any = None, - ) -> None: - self.headers = headers - self.status_code = status_code - self.body = body - - def __str__(self) -> str: - return f"headers: {self.headers}, status_code: {self.status_code}, body: {self.body}" diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py deleted file mode 100644 index a71ca52c..00000000 --- a/src/humanloop/core/client_wrapper.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import httpx -from .http_client import AsyncHttpClient, HttpClient - - -class BaseClientWrapper: - def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None): - self.api_key = api_key - self._base_url = base_url - self._timeout = timeout - - def get_headers(self) -> typing.Dict[str, str]: - headers: typing.Dict[str, str] = { - "User-Agent": "humanloop/0.8.39", - "X-Fern-Language": "Python", - "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.39", - } - headers["X-API-KEY"] = self.api_key - return headers - - def get_base_url(self) -> str: - return self._base_url - - def get_timeout(self) -> typing.Optional[float]: - return self._timeout - - -class SyncClientWrapper(BaseClientWrapper): - def __init__( - self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.Client - ): - super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) - self.httpx_client = HttpClient( - httpx_client=httpx_client, - base_headers=self.get_headers, - base_timeout=self.get_timeout, - base_url=self.get_base_url, - ) - - -class AsyncClientWrapper(BaseClientWrapper): - def __init__( - self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.AsyncClient - ): - super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) - self.httpx_client = AsyncHttpClient( - httpx_client=httpx_client, - base_headers=self.get_headers, - base_timeout=self.get_timeout, - base_url=self.get_base_url, - ) diff --git a/src/humanloop/core/datetime_utils.py b/src/humanloop/core/datetime_utils.py deleted file mode 100644 index 7c9864a9..00000000 --- a/src/humanloop/core/datetime_utils.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - - -def serialize_datetime(v: dt.datetime) -> str: - """ - Serialize a datetime including timezone info. - - Uses the timezone info provided if present, otherwise uses the current runtime's timezone info. - - UTC datetimes end in "Z" while all other timezones are represented as offset from UTC, e.g. +05:00. - """ - - def _serialize_zoned_datetime(v: dt.datetime) -> str: - if v.tzinfo is not None and v.tzinfo.tzname(None) == dt.timezone.utc.tzname(None): - # UTC is a special case where we use "Z" at the end instead of "+00:00" - return v.isoformat().replace("+00:00", "Z") - else: - # Delegate to the typical +/- offset format - return v.isoformat() - - if v.tzinfo is not None: - return _serialize_zoned_datetime(v) - else: - local_tz = dt.datetime.now().astimezone().tzinfo - localized_dt = v.replace(tzinfo=local_tz) - return _serialize_zoned_datetime(localized_dt) diff --git a/src/humanloop/core/file.py b/src/humanloop/core/file.py deleted file mode 100644 index 44b0d27c..00000000 --- a/src/humanloop/core/file.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from typing import IO, Dict, List, Mapping, Optional, Tuple, Union, cast - -# File typing inspired by the flexibility of types within the httpx library -# https://github.com/encode/httpx/blob/master/httpx/_types.py -FileContent = Union[IO[bytes], bytes, str] -File = Union[ - # file (or bytes) - FileContent, - # (filename, file (or bytes)) - Tuple[Optional[str], FileContent], - # (filename, file (or bytes), content_type) - Tuple[Optional[str], FileContent, Optional[str]], - # (filename, file (or bytes), content_type, headers) - Tuple[ - Optional[str], - FileContent, - Optional[str], - Mapping[str, str], - ], -] - - -def convert_file_dict_to_httpx_tuples( - d: Dict[str, Union[File, List[File]]], -) -> List[Tuple[str, File]]: - """ - The format we use is a list of tuples, where the first element is the - name of the file and the second is the file object. Typically HTTPX wants - a dict, but to be able to send lists of files, you have to use the list - approach (which also works for non-lists) - https://github.com/encode/httpx/pull/1032 - """ - - httpx_tuples = [] - for key, file_like in d.items(): - if isinstance(file_like, list): - for file_like_item in file_like: - httpx_tuples.append((key, file_like_item)) - else: - httpx_tuples.append((key, file_like)) - return httpx_tuples - - -def with_content_type(*, file: File, default_content_type: str) -> File: - """ - This function resolves to the file's content type, if provided, and defaults - to the default_content_type value if not. - """ - if isinstance(file, tuple): - if len(file) == 2: - filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore - return (filename, content, default_content_type) - elif len(file) == 3: - filename, content, file_content_type = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore - out_content_type = file_content_type or default_content_type - return (filename, content, out_content_type) - elif len(file) == 4: - filename, content, file_content_type, headers = cast( # type: ignore - Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], file - ) - out_content_type = file_content_type or default_content_type - return (filename, content, out_content_type, headers) - else: - raise ValueError(f"Unexpected tuple length: {len(file)}") - return (None, file, default_content_type) diff --git a/src/humanloop/core/http_client.py b/src/humanloop/core/http_client.py deleted file mode 100644 index e7bd4f79..00000000 --- a/src/humanloop/core/http_client.py +++ /dev/null @@ -1,497 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import asyncio -import email.utils -import re -import time -import typing -import urllib.parse -from contextlib import asynccontextmanager, contextmanager -from random import random - -import httpx -from .file import File, convert_file_dict_to_httpx_tuples -from .jsonable_encoder import jsonable_encoder -from .query_encoder import encode_query -from .remove_none_from_dict import remove_none_from_dict -from .request_options import RequestOptions - -INITIAL_RETRY_DELAY_SECONDS = 0.5 -MAX_RETRY_DELAY_SECONDS = 10 -MAX_RETRY_DELAY_SECONDS_FROM_HEADER = 30 - - -def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]: - """ - This function parses the `Retry-After` header in a HTTP response and returns the number of seconds to wait. - - Inspired by the urllib3 retry implementation. - """ - retry_after_ms = response_headers.get("retry-after-ms") - if retry_after_ms is not None: - try: - return int(retry_after_ms) / 1000 if retry_after_ms > 0 else 0 - except Exception: - pass - - retry_after = response_headers.get("retry-after") - if retry_after is None: - return None - - # Attempt to parse the header as an int. - if re.match(r"^\s*[0-9]+\s*$", retry_after): - seconds = float(retry_after) - # Fallback to parsing it as a date. - else: - retry_date_tuple = email.utils.parsedate_tz(retry_after) - if retry_date_tuple is None: - return None - if retry_date_tuple[9] is None: # Python 2 - # Assume UTC if no timezone was specified - # On Python2.7, parsedate_tz returns None for a timezone offset - # instead of 0 if no timezone is given, where mktime_tz treats - # a None timezone offset as local time. - retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:] - - retry_date = email.utils.mktime_tz(retry_date_tuple) - seconds = retry_date - time.time() - - if seconds < 0: - seconds = 0 - - return seconds - - -def _retry_timeout(response: httpx.Response, retries: int) -> float: - """ - Determine the amount of time to wait before retrying a request. - This function begins by trying to parse a retry-after header from the response, and then proceeds to use exponential backoff - with a jitter to determine the number of seconds to wait. - """ - - # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. - retry_after = _parse_retry_after(response.headers) - if retry_after is not None and retry_after <= MAX_RETRY_DELAY_SECONDS_FROM_HEADER: - return retry_after - - # Apply exponential backoff, capped at MAX_RETRY_DELAY_SECONDS. - retry_delay = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS) - - # Add a randomness / jitter to the retry delay to avoid overwhelming the server with retries. - timeout = retry_delay * (1 - 0.25 * random()) - return timeout if timeout >= 0 else 0 - - -def _should_retry(response: httpx.Response) -> bool: - retryable_400s = [429, 408, 409] - return response.status_code >= 500 or response.status_code in retryable_400s - - -def remove_omit_from_dict( - original: typing.Dict[str, typing.Optional[typing.Any]], - omit: typing.Optional[typing.Any], -) -> typing.Dict[str, typing.Any]: - if omit is None: - return original - new: typing.Dict[str, typing.Any] = {} - for key, value in original.items(): - if value is not omit: - new[key] = value - return new - - -def maybe_filter_request_body( - data: typing.Optional[typing.Any], - request_options: typing.Optional[RequestOptions], - omit: typing.Optional[typing.Any], -) -> typing.Optional[typing.Any]: - if data is None: - return ( - jsonable_encoder(request_options.get("additional_body_parameters", {})) or {} - if request_options is not None - else None - ) - elif not isinstance(data, typing.Mapping): - data_content = jsonable_encoder(data) - else: - data_content = { - **(jsonable_encoder(remove_omit_from_dict(data, omit))), # type: ignore - **( - jsonable_encoder(request_options.get("additional_body_parameters", {})) or {} - if request_options is not None - else {} - ), - } - return data_content - - -# Abstracted out for testing purposes -def get_request_body( - *, - json: typing.Optional[typing.Any], - data: typing.Optional[typing.Any], - request_options: typing.Optional[RequestOptions], - omit: typing.Optional[typing.Any], -) -> typing.Tuple[typing.Optional[typing.Any], typing.Optional[typing.Any]]: - json_body = None - data_body = None - if data is not None: - data_body = maybe_filter_request_body(data, request_options, omit) - else: - # If both data and json are None, we send json data in the event extra properties are specified - json_body = maybe_filter_request_body(json, request_options, omit) - - # If you have an empty JSON body, you should just send None - return (json_body if json_body != {} else None), data_body if data_body != {} else None - - -class HttpClient: - def __init__( - self, - *, - httpx_client: httpx.Client, - base_timeout: typing.Callable[[], typing.Optional[float]], - base_headers: typing.Callable[[], typing.Dict[str, str]], - base_url: typing.Optional[typing.Callable[[], str]] = None, - ): - self.base_url = base_url - self.base_timeout = base_timeout - self.base_headers = base_headers - self.httpx_client = httpx_client - - def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: - base_url = maybe_base_url - if self.base_url is not None and base_url is None: - base_url = self.base_url() - - if base_url is None: - raise ValueError("A base_url is required to make this request, please provide one and try again.") - return base_url - - def request( - self, - path: typing.Optional[str] = None, - *, - method: str, - base_url: typing.Optional[str] = None, - params: typing.Optional[typing.Dict[str, typing.Any]] = None, - json: typing.Optional[typing.Any] = None, - data: typing.Optional[typing.Any] = None, - content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, - files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, - headers: typing.Optional[typing.Dict[str, typing.Any]] = None, - request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, - omit: typing.Optional[typing.Any] = None, - ) -> httpx.Response: - base_url = self.get_base_url(base_url) - timeout = ( - request_options.get("timeout_in_seconds") - if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout() - ) - - json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) - - response = self.httpx_client.request( - method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), - } - ) - ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) or {} - if request_options is not None - else {} - ), - }, - omit, - ) - ) - ) - ), - json=json_body, - data=data_body, - content=content, - files=( - convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) - if (files is not None and files is not omit) - else None - ), - timeout=timeout, - ) - - max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0 - if _should_retry(response=response): - if max_retries > retries: - time.sleep(_retry_timeout(response=response, retries=retries)) - return self.request( - path=path, - method=method, - base_url=base_url, - params=params, - json=json, - content=content, - files=files, - headers=headers, - request_options=request_options, - retries=retries + 1, - omit=omit, - ) - - return response - - @contextmanager - def stream( - self, - path: typing.Optional[str] = None, - *, - method: str, - base_url: typing.Optional[str] = None, - params: typing.Optional[typing.Dict[str, typing.Any]] = None, - json: typing.Optional[typing.Any] = None, - data: typing.Optional[typing.Any] = None, - content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, - files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, - headers: typing.Optional[typing.Dict[str, typing.Any]] = None, - request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, - omit: typing.Optional[typing.Any] = None, - ) -> typing.Iterator[httpx.Response]: - base_url = self.get_base_url(base_url) - timeout = ( - request_options.get("timeout_in_seconds") - if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout() - ) - - json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) - - with self.httpx_client.stream( - method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) if request_options is not None else {}), - } - ) - ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) - if request_options is not None - else {} - ), - }, - omit, - ) - ) - ) - ), - json=json_body, - data=data_body, - content=content, - files=( - convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) - if (files is not None and files is not omit) - else None - ), - timeout=timeout, - ) as stream: - yield stream - - -class AsyncHttpClient: - def __init__( - self, - *, - httpx_client: httpx.AsyncClient, - base_timeout: typing.Callable[[], typing.Optional[float]], - base_headers: typing.Callable[[], typing.Dict[str, str]], - base_url: typing.Optional[typing.Callable[[], str]] = None, - ): - self.base_url = base_url - self.base_timeout = base_timeout - self.base_headers = base_headers - self.httpx_client = httpx_client - - def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: - base_url = maybe_base_url - if self.base_url is not None and base_url is None: - base_url = self.base_url() - - if base_url is None: - raise ValueError("A base_url is required to make this request, please provide one and try again.") - return base_url - - async def request( - self, - path: typing.Optional[str] = None, - *, - method: str, - base_url: typing.Optional[str] = None, - params: typing.Optional[typing.Dict[str, typing.Any]] = None, - json: typing.Optional[typing.Any] = None, - data: typing.Optional[typing.Any] = None, - content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, - files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, - headers: typing.Optional[typing.Dict[str, typing.Any]] = None, - request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, - omit: typing.Optional[typing.Any] = None, - ) -> httpx.Response: - base_url = self.get_base_url(base_url) - timeout = ( - request_options.get("timeout_in_seconds") - if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout() - ) - - json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) - - # Add the input to each of these and do None-safety checks - response = await self.httpx_client.request( - method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), - } - ) - ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) or {} - if request_options is not None - else {} - ), - }, - omit, - ) - ) - ) - ), - json=json_body, - data=data_body, - content=content, - files=( - convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) - if files is not None - else None - ), - timeout=timeout, - ) - - max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0 - if _should_retry(response=response): - if max_retries > retries: - await asyncio.sleep(_retry_timeout(response=response, retries=retries)) - return await self.request( - path=path, - method=method, - base_url=base_url, - params=params, - json=json, - content=content, - files=files, - headers=headers, - request_options=request_options, - retries=retries + 1, - omit=omit, - ) - return response - - @asynccontextmanager - async def stream( - self, - path: typing.Optional[str] = None, - *, - method: str, - base_url: typing.Optional[str] = None, - params: typing.Optional[typing.Dict[str, typing.Any]] = None, - json: typing.Optional[typing.Any] = None, - data: typing.Optional[typing.Any] = None, - content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, - files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, - headers: typing.Optional[typing.Dict[str, typing.Any]] = None, - request_options: typing.Optional[RequestOptions] = None, - retries: int = 2, - omit: typing.Optional[typing.Any] = None, - ) -> typing.AsyncIterator[httpx.Response]: - base_url = self.get_base_url(base_url) - timeout = ( - request_options.get("timeout_in_seconds") - if request_options is not None and request_options.get("timeout_in_seconds") is not None - else self.base_timeout() - ) - - json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) - - async with self.httpx_client.stream( - method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **(request_options.get("additional_headers", {}) if request_options is not None else {}), - } - ) - ), - params=encode_query( - jsonable_encoder( - remove_none_from_dict( - remove_omit_from_dict( - { - **(params if params is not None else {}), - **( - request_options.get("additional_query_parameters", {}) - if request_options is not None - else {} - ), - }, - omit=omit, - ) - ) - ) - ), - json=json_body, - data=data_body, - content=content, - files=( - convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) - if files is not None - else None - ), - timeout=timeout, - ) as stream: - yield stream diff --git a/src/humanloop/core/http_response.py b/src/humanloop/core/http_response.py deleted file mode 100644 index 48a1798a..00000000 --- a/src/humanloop/core/http_response.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from typing import Dict, Generic, TypeVar - -import httpx - -T = TypeVar("T") -"""Generic to represent the underlying type of the data wrapped by the HTTP response.""" - - -class BaseHttpResponse: - """Minimalist HTTP response wrapper that exposes response headers.""" - - _response: httpx.Response - - def __init__(self, response: httpx.Response): - self._response = response - - @property - def headers(self) -> Dict[str, str]: - return dict(self._response.headers) - - -class HttpResponse(Generic[T], BaseHttpResponse): - """HTTP response wrapper that exposes response headers and data.""" - - _data: T - - def __init__(self, response: httpx.Response, data: T): - super().__init__(response) - self._data = data - - @property - def data(self) -> T: - return self._data - - def close(self) -> None: - self._response.close() - - -class AsyncHttpResponse(Generic[T], BaseHttpResponse): - """HTTP response wrapper that exposes response headers and data.""" - - _data: T - - def __init__(self, response: httpx.Response, data: T): - super().__init__(response) - self._data = data - - @property - def data(self) -> T: - return self._data - - async def close(self) -> None: - await self._response.aclose() diff --git a/src/humanloop/core/jsonable_encoder.py b/src/humanloop/core/jsonable_encoder.py deleted file mode 100644 index afee3662..00000000 --- a/src/humanloop/core/jsonable_encoder.py +++ /dev/null @@ -1,100 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -""" -jsonable_encoder converts a Python object to a JSON-friendly dict -(e.g. datetimes to strings, Pydantic models to dicts). - -Taken from FastAPI, and made a bit simpler -https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py -""" - -import base64 -import dataclasses -import datetime as dt -from enum import Enum -from pathlib import PurePath -from types import GeneratorType -from typing import Any, Callable, Dict, List, Optional, Set, Union - -import pydantic -from .datetime_utils import serialize_datetime -from .pydantic_utilities import ( - IS_PYDANTIC_V2, - encode_by_type, - to_jsonable_with_fallback, -) - -SetIntStr = Set[Union[int, str]] -DictIntStrAny = Dict[Union[int, str], Any] - - -def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any: - custom_encoder = custom_encoder or {} - if custom_encoder: - if type(obj) in custom_encoder: - return custom_encoder[type(obj)](obj) - else: - for encoder_type, encoder_instance in custom_encoder.items(): - if isinstance(obj, encoder_type): - return encoder_instance(obj) - if isinstance(obj, pydantic.BaseModel): - if IS_PYDANTIC_V2: - encoder = getattr(obj.model_config, "json_encoders", {}) # type: ignore # Pydantic v2 - else: - encoder = getattr(obj.__config__, "json_encoders", {}) # type: ignore # Pydantic v1 - if custom_encoder: - encoder.update(custom_encoder) - obj_dict = obj.dict(by_alias=True) - if "__root__" in obj_dict: - obj_dict = obj_dict["__root__"] - if "root" in obj_dict: - obj_dict = obj_dict["root"] - return jsonable_encoder(obj_dict, custom_encoder=encoder) - if dataclasses.is_dataclass(obj): - obj_dict = dataclasses.asdict(obj) # type: ignore - return jsonable_encoder(obj_dict, custom_encoder=custom_encoder) - if isinstance(obj, bytes): - return base64.b64encode(obj).decode("utf-8") - if isinstance(obj, Enum): - return obj.value - if isinstance(obj, PurePath): - return str(obj) - if isinstance(obj, (str, int, float, type(None))): - return obj - if isinstance(obj, dt.datetime): - return serialize_datetime(obj) - if isinstance(obj, dt.date): - return str(obj) - if isinstance(obj, dict): - encoded_dict = {} - allowed_keys = set(obj.keys()) - for key, value in obj.items(): - if key in allowed_keys: - encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder) - encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder) - encoded_dict[encoded_key] = encoded_value - return encoded_dict - if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): - encoded_list = [] - for item in obj: - encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder)) - return encoded_list - - def fallback_serializer(o: Any) -> Any: - attempt_encode = encode_by_type(o) - if attempt_encode is not None: - return attempt_encode - - try: - data = dict(o) - except Exception as e: - errors: List[Exception] = [] - errors.append(e) - try: - data = vars(o) - except Exception as e: - errors.append(e) - raise ValueError(errors) from e - return jsonable_encoder(data, custom_encoder=custom_encoder) - - return to_jsonable_with_fallback(obj, fallback_serializer) diff --git a/src/humanloop/core/pagination.py b/src/humanloop/core/pagination.py deleted file mode 100644 index 209a1ff1..00000000 --- a/src/humanloop/core/pagination.py +++ /dev/null @@ -1,82 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -from dataclasses import dataclass -from typing import AsyncIterator, Awaitable, Callable, Generic, Iterator, List, Optional, TypeVar - -from .http_response import BaseHttpResponse - -T = TypeVar("T") -"""Generic to represent the underlying type of the results within a page""" - - -# SDKs implement a Page ABC per-pagination request, the endpoint then returns a pager that wraps this type -# for example, an endpoint will return SyncPager[UserPage] where UserPage implements the Page ABC. ex: -# -# SyncPager( -# has_next=response.list_metadata.after is not None, -# items=response.data, -# # This should be the outer function that returns the SyncPager again -# get_next=lambda: list(..., cursor: response.cursor) (or list(..., offset: offset + 1)) -# ) - - -@dataclass(frozen=True) -class SyncPager(Generic[T]): - get_next: Optional[Callable[[], Optional[SyncPager[T]]]] - has_next: bool - items: Optional[List[T]] - response: Optional[BaseHttpResponse] - - # Here we type ignore the iterator to avoid a mypy error - # caused by the type conflict with Pydanitc's __iter__ method - # brought in by extending the base model - def __iter__(self) -> Iterator[T]: # type: ignore[override] - for page in self.iter_pages(): - if page.items is not None: - yield from page.items - - def iter_pages(self) -> Iterator[SyncPager[T]]: - page: Optional[SyncPager[T]] = self - while page is not None: - yield page - - if not page.has_next or page.get_next is None: - return - - page = page.get_next() - if page is None or page.items is None or len(page.items) == 0: - return - - def next_page(self) -> Optional[SyncPager[T]]: - return self.get_next() if self.get_next is not None else None - - -@dataclass(frozen=True) -class AsyncPager(Generic[T]): - get_next: Optional[Callable[[], Awaitable[Optional[AsyncPager[T]]]]] - has_next: bool - items: Optional[List[T]] - response: Optional[BaseHttpResponse] - - async def __aiter__(self) -> AsyncIterator[T]: - async for page in self.iter_pages(): - if page.items is not None: - for item in page.items: - yield item - - async def iter_pages(self) -> AsyncIterator[AsyncPager[T]]: - page: Optional[AsyncPager[T]] = self - while page is not None: - yield page - - if not page.has_next or page.get_next is None: - return - - page = await page.get_next() - if page is None or page.items is None or len(page.items) == 0: - return - - async def next_page(self) -> Optional[AsyncPager[T]]: - return await self.get_next() if self.get_next is not None else None diff --git a/src/humanloop/core/pydantic_utilities.py b/src/humanloop/core/pydantic_utilities.py deleted file mode 100644 index 0360ef49..00000000 --- a/src/humanloop/core/pydantic_utilities.py +++ /dev/null @@ -1,255 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# nopycln: file -import datetime as dt -from collections import defaultdict -from typing import Any, Callable, ClassVar, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast - -import pydantic - -IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.") - -if IS_PYDANTIC_V2: - from pydantic.v1.datetime_parse import parse_date as parse_date - from pydantic.v1.datetime_parse import parse_datetime as parse_datetime - from pydantic.v1.fields import ModelField as ModelField - from pydantic.v1.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[attr-defined] - from pydantic.v1.typing import get_args as get_args - from pydantic.v1.typing import get_origin as get_origin - from pydantic.v1.typing import is_literal_type as is_literal_type - from pydantic.v1.typing import is_union as is_union -else: - from pydantic.datetime_parse import parse_date as parse_date # type: ignore[no-redef] - from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore[no-redef] - from pydantic.fields import ModelField as ModelField # type: ignore[attr-defined, no-redef] - from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[no-redef] - from pydantic.typing import get_args as get_args # type: ignore[no-redef] - from pydantic.typing import get_origin as get_origin # type: ignore[no-redef] - from pydantic.typing import is_literal_type as is_literal_type # type: ignore[no-redef] - from pydantic.typing import is_union as is_union # type: ignore[no-redef] - -from .datetime_utils import serialize_datetime -from .serialization import convert_and_respect_annotation_metadata -from typing_extensions import TypeAlias - -T = TypeVar("T") -Model = TypeVar("Model", bound=pydantic.BaseModel) - - -def parse_obj_as(type_: Type[T], object_: Any) -> T: - dealiased_object = convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read") - if IS_PYDANTIC_V2: - adapter = pydantic.TypeAdapter(type_) # type: ignore[attr-defined] - return adapter.validate_python(dealiased_object) - return pydantic.parse_obj_as(type_, dealiased_object) - - -def to_jsonable_with_fallback(obj: Any, fallback_serializer: Callable[[Any], Any]) -> Any: - if IS_PYDANTIC_V2: - from pydantic_core import to_jsonable_python - - return to_jsonable_python(obj, fallback=fallback_serializer) - return fallback_serializer(obj) - - -class UniversalBaseModel(pydantic.BaseModel): - if IS_PYDANTIC_V2: - model_config: ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( # type: ignore[typeddict-unknown-key] - # Allow fields beginning with `model_` to be used in the model - protected_namespaces=(), - ) - - @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore[attr-defined] - def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> Any: # type: ignore[name-defined] - serialized = handler(self) - data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()} - return data - - else: - - class Config: - smart_union = True - json_encoders = {dt.datetime: serialize_datetime} - - @classmethod - def model_construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model": - dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read") - return cls.construct(_fields_set, **dealiased_object) - - @classmethod - def construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model": - dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read") - if IS_PYDANTIC_V2: - return super().model_construct(_fields_set, **dealiased_object) # type: ignore[misc] - return super().construct(_fields_set, **dealiased_object) - - def json(self, **kwargs: Any) -> str: - kwargs_with_defaults = { - "by_alias": True, - "exclude_unset": True, - **kwargs, - } - if IS_PYDANTIC_V2: - return super().model_dump_json(**kwargs_with_defaults) # type: ignore[misc] - return super().json(**kwargs_with_defaults) - - def dict(self, **kwargs: Any) -> Dict[str, Any]: - """ - Override the default dict method to `exclude_unset` by default. This function patches - `exclude_unset` to work include fields within non-None default values. - """ - # Note: the logic here is multiplexed given the levers exposed in Pydantic V1 vs V2 - # Pydantic V1's .dict can be extremely slow, so we do not want to call it twice. - # - # We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models - # that we have less control over, and this is less intrusive than custom serializers for now. - if IS_PYDANTIC_V2: - kwargs_with_defaults_exclude_unset = { - **kwargs, - "by_alias": True, - "exclude_unset": True, - "exclude_none": False, - } - kwargs_with_defaults_exclude_none = { - **kwargs, - "by_alias": True, - "exclude_none": True, - "exclude_unset": False, - } - dict_dump = deep_union_pydantic_dicts( - super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore[misc] - super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore[misc] - ) - - else: - _fields_set = self.__fields_set__.copy() - - fields = _get_model_fields(self.__class__) - for name, field in fields.items(): - if name not in _fields_set: - default = _get_field_default(field) - - # If the default values are non-null act like they've been set - # This effectively allows exclude_unset to work like exclude_none where - # the latter passes through intentionally set none values. - if default is not None or ("exclude_unset" in kwargs and not kwargs["exclude_unset"]): - _fields_set.add(name) - - if default is not None: - self.__fields_set__.add(name) - - kwargs_with_defaults_exclude_unset_include_fields = { - "by_alias": True, - "exclude_unset": True, - "include": _fields_set, - **kwargs, - } - - dict_dump = super().dict(**kwargs_with_defaults_exclude_unset_include_fields) - - return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write") - - -def _union_list_of_pydantic_dicts(source: List[Any], destination: List[Any]) -> List[Any]: - converted_list: List[Any] = [] - for i, item in enumerate(source): - destination_value = destination[i] - if isinstance(item, dict): - converted_list.append(deep_union_pydantic_dicts(item, destination_value)) - elif isinstance(item, list): - converted_list.append(_union_list_of_pydantic_dicts(item, destination_value)) - else: - converted_list.append(item) - return converted_list - - -def deep_union_pydantic_dicts(source: Dict[str, Any], destination: Dict[str, Any]) -> Dict[str, Any]: - for key, value in source.items(): - node = destination.setdefault(key, {}) - if isinstance(value, dict): - deep_union_pydantic_dicts(value, node) - # Note: we do not do this same processing for sets given we do not have sets of models - # and given the sets are unordered, the processing of the set and matching objects would - # be non-trivial. - elif isinstance(value, list): - destination[key] = _union_list_of_pydantic_dicts(value, node) - else: - destination[key] = value - - return destination - - -if IS_PYDANTIC_V2: - - class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore[misc, name-defined, type-arg] - pass - - UniversalRootModel: TypeAlias = V2RootModel # type: ignore[misc] -else: - UniversalRootModel: TypeAlias = UniversalBaseModel # type: ignore[misc, no-redef] - - -def encode_by_type(o: Any) -> Any: - encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(tuple) - for type_, encoder in encoders_by_type.items(): - encoders_by_class_tuples[encoder] += (type_,) - - if type(o) in encoders_by_type: - return encoders_by_type[type(o)](o) - for encoder, classes_tuple in encoders_by_class_tuples.items(): - if isinstance(o, classes_tuple): - return encoder(o) - - -def update_forward_refs(model: Type["Model"], **localns: Any) -> None: - if IS_PYDANTIC_V2: - model.model_rebuild(raise_errors=False) # type: ignore[attr-defined] - else: - model.update_forward_refs(**localns) - - -# Mirrors Pydantic's internal typing -AnyCallable = Callable[..., Any] - - -def universal_root_validator( - pre: bool = False, -) -> Callable[[AnyCallable], AnyCallable]: - def decorator(func: AnyCallable) -> AnyCallable: - if IS_PYDANTIC_V2: - return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined] - return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload] - - return decorator - - -def universal_field_validator(field_name: str, pre: bool = False) -> Callable[[AnyCallable], AnyCallable]: - def decorator(func: AnyCallable) -> AnyCallable: - if IS_PYDANTIC_V2: - return cast(AnyCallable, pydantic.field_validator(field_name, mode="before" if pre else "after")(func)) # type: ignore[attr-defined] - return cast(AnyCallable, pydantic.validator(field_name, pre=pre)(func)) - - return decorator - - -PydanticField = Union[ModelField, pydantic.fields.FieldInfo] - - -def _get_model_fields(model: Type["Model"]) -> Mapping[str, PydanticField]: - if IS_PYDANTIC_V2: - return cast(Mapping[str, PydanticField], model.model_fields) # type: ignore[attr-defined] - return cast(Mapping[str, PydanticField], model.__fields__) - - -def _get_field_default(field: PydanticField) -> Any: - try: - value = field.get_default() # type: ignore[union-attr] - except: - value = field.default - if IS_PYDANTIC_V2: - from pydantic_core import PydanticUndefined - - if value == PydanticUndefined: - return None - return value - return value diff --git a/src/humanloop/core/query_encoder.py b/src/humanloop/core/query_encoder.py deleted file mode 100644 index 3183001d..00000000 --- a/src/humanloop/core/query_encoder.py +++ /dev/null @@ -1,58 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from typing import Any, Dict, List, Optional, Tuple - -import pydantic - - -# Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict -def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[str] = None) -> List[Tuple[str, Any]]: - result = [] - for k, v in dict_flat.items(): - key = f"{key_prefix}[{k}]" if key_prefix is not None else k - if isinstance(v, dict): - result.extend(traverse_query_dict(v, key)) - elif isinstance(v, list): - for arr_v in v: - if isinstance(arr_v, dict): - result.extend(traverse_query_dict(arr_v, key)) - else: - result.append((key, arr_v)) - else: - result.append((key, v)) - return result - - -def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple[str, Any]]: - if isinstance(query_value, pydantic.BaseModel) or isinstance(query_value, dict): - if isinstance(query_value, pydantic.BaseModel): - obj_dict = query_value.dict(by_alias=True) - else: - obj_dict = query_value - return traverse_query_dict(obj_dict, query_key) - elif isinstance(query_value, list): - encoded_values: List[Tuple[str, Any]] = [] - for value in query_value: - if isinstance(value, pydantic.BaseModel) or isinstance(value, dict): - if isinstance(value, pydantic.BaseModel): - obj_dict = value.dict(by_alias=True) - elif isinstance(value, dict): - obj_dict = value - - encoded_values.extend(single_query_encoder(query_key, obj_dict)) - else: - encoded_values.append((query_key, value)) - - return encoded_values - - return [(query_key, query_value)] - - -def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple[str, Any]]]: - if query is None: - return None - - encoded_query = [] - for k, v in query.items(): - encoded_query.extend(single_query_encoder(k, v)) - return encoded_query diff --git a/src/humanloop/core/remove_none_from_dict.py b/src/humanloop/core/remove_none_from_dict.py deleted file mode 100644 index c2298143..00000000 --- a/src/humanloop/core/remove_none_from_dict.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from typing import Any, Dict, Mapping, Optional - - -def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict[str, Any]: - new: Dict[str, Any] = {} - for key, value in original.items(): - if value is not None: - new[key] = value - return new diff --git a/src/humanloop/core/request_options.py b/src/humanloop/core/request_options.py deleted file mode 100644 index 1b388044..00000000 --- a/src/humanloop/core/request_options.py +++ /dev/null @@ -1,35 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -try: - from typing import NotRequired # type: ignore -except ImportError: - from typing_extensions import NotRequired - - -class RequestOptions(typing.TypedDict, total=False): - """ - Additional options for request-specific configuration when calling APIs via the SDK. - This is used primarily as an optional final parameter for service functions. - - Attributes: - - timeout_in_seconds: int. The number of seconds to await an API call before timing out. - - - max_retries: int. The max number of retries to attempt if the API call fails. - - - additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict - - - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict - - - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict - - - chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads. - """ - - timeout_in_seconds: NotRequired[int] - max_retries: NotRequired[int] - additional_headers: NotRequired[typing.Dict[str, typing.Any]] - additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]] - additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]] - chunk_size: NotRequired[int] diff --git a/src/humanloop/core/serialization.py b/src/humanloop/core/serialization.py deleted file mode 100644 index c36e865c..00000000 --- a/src/humanloop/core/serialization.py +++ /dev/null @@ -1,276 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import collections -import inspect -import typing - -import pydantic -import typing_extensions - - -class FieldMetadata: - """ - Metadata class used to annotate fields to provide additional information. - - Example: - class MyDict(TypedDict): - field: typing.Annotated[str, FieldMetadata(alias="field_name")] - - Will serialize: `{"field": "value"}` - To: `{"field_name": "value"}` - """ - - alias: str - - def __init__(self, *, alias: str) -> None: - self.alias = alias - - -def convert_and_respect_annotation_metadata( - *, - object_: typing.Any, - annotation: typing.Any, - inner_type: typing.Optional[typing.Any] = None, - direction: typing.Literal["read", "write"], -) -> typing.Any: - """ - Respect the metadata annotations on a field, such as aliasing. This function effectively - manipulates the dict-form of an object to respect the metadata annotations. This is primarily used for - TypedDicts, which cannot support aliasing out of the box, and can be extended for additional - utilities, such as defaults. - - Parameters - ---------- - object_ : typing.Any - - annotation : type - The type we're looking to apply typing annotations from - - inner_type : typing.Optional[type] - - Returns - ------- - typing.Any - """ - - if object_ is None: - return None - if inner_type is None: - inner_type = annotation - - clean_type = _remove_annotations(inner_type) - # Pydantic models - if ( - inspect.isclass(clean_type) - and issubclass(clean_type, pydantic.BaseModel) - and isinstance(object_, typing.Mapping) - ): - return _convert_mapping(object_, clean_type, direction) - # TypedDicts - if typing_extensions.is_typeddict(clean_type) and isinstance(object_, typing.Mapping): - return _convert_mapping(object_, clean_type, direction) - - if ( - typing_extensions.get_origin(clean_type) == typing.Dict - or typing_extensions.get_origin(clean_type) == dict - or clean_type == typing.Dict - ) and isinstance(object_, typing.Dict): - key_type = typing_extensions.get_args(clean_type)[0] - value_type = typing_extensions.get_args(clean_type)[1] - - return { - key: convert_and_respect_annotation_metadata( - object_=value, - annotation=annotation, - inner_type=value_type, - direction=direction, - ) - for key, value in object_.items() - } - - # If you're iterating on a string, do not bother to coerce it to a sequence. - if not isinstance(object_, str): - if ( - typing_extensions.get_origin(clean_type) == typing.Set - or typing_extensions.get_origin(clean_type) == set - or clean_type == typing.Set - ) and isinstance(object_, typing.Set): - inner_type = typing_extensions.get_args(clean_type)[0] - return { - convert_and_respect_annotation_metadata( - object_=item, - annotation=annotation, - inner_type=inner_type, - direction=direction, - ) - for item in object_ - } - elif ( - ( - typing_extensions.get_origin(clean_type) == typing.List - or typing_extensions.get_origin(clean_type) == list - or clean_type == typing.List - ) - and isinstance(object_, typing.List) - ) or ( - ( - typing_extensions.get_origin(clean_type) == typing.Sequence - or typing_extensions.get_origin(clean_type) == collections.abc.Sequence - or clean_type == typing.Sequence - ) - and isinstance(object_, typing.Sequence) - ): - inner_type = typing_extensions.get_args(clean_type)[0] - return [ - convert_and_respect_annotation_metadata( - object_=item, - annotation=annotation, - inner_type=inner_type, - direction=direction, - ) - for item in object_ - ] - - if typing_extensions.get_origin(clean_type) == typing.Union: - # We should be able to ~relatively~ safely try to convert keys against all - # member types in the union, the edge case here is if one member aliases a field - # of the same name to a different name from another member - # Or if another member aliases a field of the same name that another member does not. - for member in typing_extensions.get_args(clean_type): - object_ = convert_and_respect_annotation_metadata( - object_=object_, - annotation=annotation, - inner_type=member, - direction=direction, - ) - return object_ - - annotated_type = _get_annotation(annotation) - if annotated_type is None: - return object_ - - # If the object is not a TypedDict, a Union, or other container (list, set, sequence, etc.) - # Then we can safely call it on the recursive conversion. - return object_ - - -def _convert_mapping( - object_: typing.Mapping[str, object], - expected_type: typing.Any, - direction: typing.Literal["read", "write"], -) -> typing.Mapping[str, object]: - converted_object: typing.Dict[str, object] = {} - try: - annotations = typing_extensions.get_type_hints(expected_type, include_extras=True) - except NameError: - # The TypedDict contains a circular reference, so - # we use the __annotations__ attribute directly. - annotations = getattr(expected_type, "__annotations__", {}) - aliases_to_field_names = _get_alias_to_field_name(annotations) - for key, value in object_.items(): - if direction == "read" and key in aliases_to_field_names: - dealiased_key = aliases_to_field_names.get(key) - if dealiased_key is not None: - type_ = annotations.get(dealiased_key) - else: - type_ = annotations.get(key) - # Note you can't get the annotation by the field name if you're in read mode, so you must check the aliases map - # - # So this is effectively saying if we're in write mode, and we don't have a type, or if we're in read mode and we don't have an alias - # then we can just pass the value through as is - if type_ is None: - converted_object[key] = value - elif direction == "read" and key not in aliases_to_field_names: - converted_object[key] = convert_and_respect_annotation_metadata( - object_=value, annotation=type_, direction=direction - ) - else: - converted_object[_alias_key(key, type_, direction, aliases_to_field_names)] = ( - convert_and_respect_annotation_metadata(object_=value, annotation=type_, direction=direction) - ) - return converted_object - - -def _get_annotation(type_: typing.Any) -> typing.Optional[typing.Any]: - maybe_annotated_type = typing_extensions.get_origin(type_) - if maybe_annotated_type is None: - return None - - if maybe_annotated_type == typing_extensions.NotRequired: - type_ = typing_extensions.get_args(type_)[0] - maybe_annotated_type = typing_extensions.get_origin(type_) - - if maybe_annotated_type == typing_extensions.Annotated: - return type_ - - return None - - -def _remove_annotations(type_: typing.Any) -> typing.Any: - maybe_annotated_type = typing_extensions.get_origin(type_) - if maybe_annotated_type is None: - return type_ - - if maybe_annotated_type == typing_extensions.NotRequired: - return _remove_annotations(typing_extensions.get_args(type_)[0]) - - if maybe_annotated_type == typing_extensions.Annotated: - return _remove_annotations(typing_extensions.get_args(type_)[0]) - - return type_ - - -def get_alias_to_field_mapping(type_: typing.Any) -> typing.Dict[str, str]: - annotations = typing_extensions.get_type_hints(type_, include_extras=True) - return _get_alias_to_field_name(annotations) - - -def get_field_to_alias_mapping(type_: typing.Any) -> typing.Dict[str, str]: - annotations = typing_extensions.get_type_hints(type_, include_extras=True) - return _get_field_to_alias_name(annotations) - - -def _get_alias_to_field_name( - field_to_hint: typing.Dict[str, typing.Any], -) -> typing.Dict[str, str]: - aliases = {} - for field, hint in field_to_hint.items(): - maybe_alias = _get_alias_from_type(hint) - if maybe_alias is not None: - aliases[maybe_alias] = field - return aliases - - -def _get_field_to_alias_name( - field_to_hint: typing.Dict[str, typing.Any], -) -> typing.Dict[str, str]: - aliases = {} - for field, hint in field_to_hint.items(): - maybe_alias = _get_alias_from_type(hint) - if maybe_alias is not None: - aliases[field] = maybe_alias - return aliases - - -def _get_alias_from_type(type_: typing.Any) -> typing.Optional[str]: - maybe_annotated_type = _get_annotation(type_) - - if maybe_annotated_type is not None: - # The actual annotations are 1 onward, the first is the annotated type - annotations = typing_extensions.get_args(maybe_annotated_type)[1:] - - for annotation in annotations: - if isinstance(annotation, FieldMetadata) and annotation.alias is not None: - return annotation.alias - return None - - -def _alias_key( - key: str, - type_: typing.Any, - direction: typing.Literal["read", "write"], - aliases_to_field_names: typing.Dict[str, str], -) -> str: - if direction == "read": - return aliases_to_field_names.get(key, key) - return _get_alias_from_type(type_=type_) or key diff --git a/src/humanloop/core/unchecked_base_model.py b/src/humanloop/core/unchecked_base_model.py deleted file mode 100644 index 2c2d92a7..00000000 --- a/src/humanloop/core/unchecked_base_model.py +++ /dev/null @@ -1,303 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import inspect -import typing -import uuid - -import pydantic -import typing_extensions -from .pydantic_utilities import ( - IS_PYDANTIC_V2, - ModelField, - UniversalBaseModel, - get_args, - get_origin, - is_literal_type, - is_union, - parse_date, - parse_datetime, - parse_obj_as, -) -from .serialization import get_field_to_alias_mapping -from pydantic_core import PydanticUndefined - - -class UnionMetadata: - discriminant: str - - def __init__(self, *, discriminant: str) -> None: - self.discriminant = discriminant - - -Model = typing.TypeVar("Model", bound=pydantic.BaseModel) - - -class UncheckedBaseModel(UniversalBaseModel): - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow") # type: ignore # Pydantic v2 - else: - - class Config: - extra = pydantic.Extra.allow - - @classmethod - def model_construct( - cls: typing.Type["Model"], - _fields_set: typing.Optional[typing.Set[str]] = None, - **values: typing.Any, - ) -> "Model": - # Fallback construct function to the specified override below. - return cls.construct(_fields_set=_fields_set, **values) - - # Allow construct to not validate model - # Implementation taken from: https://github.com/pydantic/pydantic/issues/1168#issuecomment-817742836 - @classmethod - def construct( - cls: typing.Type["Model"], - _fields_set: typing.Optional[typing.Set[str]] = None, - **values: typing.Any, - ) -> "Model": - m = cls.__new__(cls) - fields_values = {} - - if _fields_set is None: - _fields_set = set(values.keys()) - - fields = _get_model_fields(cls) - populate_by_name = _get_is_populate_by_name(cls) - field_aliases = get_field_to_alias_mapping(cls) - - for name, field in fields.items(): - # Key here is only used to pull data from the values dict - # you should always use the NAME of the field to for field_values, etc. - # because that's how the object is constructed from a pydantic perspective - key = field.alias - if (key is None or field.alias == name) and name in field_aliases: - key = field_aliases[name] - - if key is None or (key not in values and populate_by_name): # Added this to allow population by field name - key = name - - if key in values: - if IS_PYDANTIC_V2: - type_ = field.annotation # type: ignore # Pydantic v2 - else: - type_ = typing.cast(typing.Type, field.outer_type_) # type: ignore # Pydantic < v1.10.15 - - fields_values[name] = ( - construct_type(object_=values[key], type_=type_) if type_ is not None else values[key] - ) - _fields_set.add(name) - else: - default = _get_field_default(field) - fields_values[name] = default - - # If the default values are non-null act like they've been set - # This effectively allows exclude_unset to work like exclude_none where - # the latter passes through intentionally set none values. - if default != None and default != PydanticUndefined: - _fields_set.add(name) - - # Add extras back in - extras = {} - pydantic_alias_fields = [field.alias for field in fields.values()] - internal_alias_fields = list(field_aliases.values()) - for key, value in values.items(): - # If the key is not a field by name, nor an alias to a field, then it's extra - if (key not in pydantic_alias_fields and key not in internal_alias_fields) and key not in fields: - if IS_PYDANTIC_V2: - extras[key] = value - else: - _fields_set.add(key) - fields_values[key] = value - - object.__setattr__(m, "__dict__", fields_values) - - if IS_PYDANTIC_V2: - object.__setattr__(m, "__pydantic_private__", None) - object.__setattr__(m, "__pydantic_extra__", extras) - object.__setattr__(m, "__pydantic_fields_set__", _fields_set) - else: - object.__setattr__(m, "__fields_set__", _fields_set) - m._init_private_attributes() # type: ignore # Pydantic v1 - return m - - -def _convert_undiscriminated_union_type(union_type: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: - inner_types = get_args(union_type) - if typing.Any in inner_types: - return object_ - - for inner_type in inner_types: - try: - if inspect.isclass(inner_type) and issubclass(inner_type, pydantic.BaseModel): - # Attempt a validated parse until one works - return parse_obj_as(inner_type, object_) - except Exception: - continue - - # If none of the types work, just return the first successful cast - for inner_type in inner_types: - try: - return construct_type(object_=object_, type_=inner_type) - except Exception: - continue - - -def _convert_union_type(type_: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: - base_type = get_origin(type_) or type_ - union_type = type_ - if base_type == typing_extensions.Annotated: - union_type = get_args(type_)[0] - annotated_metadata = get_args(type_)[1:] - for metadata in annotated_metadata: - if isinstance(metadata, UnionMetadata): - try: - # Cast to the correct type, based on the discriminant - for inner_type in get_args(union_type): - try: - objects_discriminant = getattr(object_, metadata.discriminant) - except: - objects_discriminant = object_[metadata.discriminant] - if inner_type.__fields__[metadata.discriminant].default == objects_discriminant: - return construct_type(object_=object_, type_=inner_type) - except Exception: - # Allow to fall through to our regular union handling - pass - return _convert_undiscriminated_union_type(union_type, object_) - - -def construct_type(*, type_: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: - """ - Here we are essentially creating the same `construct` method in spirit as the above, but for all types, not just - Pydantic models. - The idea is to essentially attempt to coerce object_ to type_ (recursively) - """ - # Short circuit when dealing with optionals, don't try to coerces None to a type - if object_ is None: - return None - - base_type = get_origin(type_) or type_ - is_annotated = base_type == typing_extensions.Annotated - maybe_annotation_members = get_args(type_) - is_annotated_union = is_annotated and is_union(get_origin(maybe_annotation_members[0])) - - if base_type == typing.Any: - return object_ - - if base_type == dict: - if not isinstance(object_, typing.Mapping): - return object_ - - key_type, items_type = get_args(type_) - d = { - construct_type(object_=key, type_=key_type): construct_type(object_=item, type_=items_type) - for key, item in object_.items() - } - return d - - if base_type == list: - if not isinstance(object_, list): - return object_ - - inner_type = get_args(type_)[0] - return [construct_type(object_=entry, type_=inner_type) for entry in object_] - - if base_type == set: - if not isinstance(object_, set) and not isinstance(object_, list): - return object_ - - inner_type = get_args(type_)[0] - return {construct_type(object_=entry, type_=inner_type) for entry in object_} - - if is_union(base_type) or is_annotated_union: - return _convert_union_type(type_, object_) - - # Cannot do an `issubclass` with a literal type, let's also just confirm we have a class before this call - if ( - object_ is not None - and not is_literal_type(type_) - and ( - (inspect.isclass(base_type) and issubclass(base_type, pydantic.BaseModel)) - or ( - is_annotated - and inspect.isclass(maybe_annotation_members[0]) - and issubclass(maybe_annotation_members[0], pydantic.BaseModel) - ) - ) - ): - if IS_PYDANTIC_V2: - return type_.model_construct(**object_) - else: - return type_.construct(**object_) - - if base_type == dt.datetime: - try: - return parse_datetime(object_) - except Exception: - return object_ - - if base_type == dt.date: - try: - return parse_date(object_) - except Exception: - return object_ - - if base_type == uuid.UUID: - try: - return uuid.UUID(object_) - except Exception: - return object_ - - if base_type == int: - try: - return int(object_) - except Exception: - return object_ - - if base_type == bool: - try: - if isinstance(object_, str): - stringified_object = object_.lower() - return stringified_object == "true" or stringified_object == "1" - - return bool(object_) - except Exception: - return object_ - - return object_ - - -def _get_is_populate_by_name(model: typing.Type["Model"]) -> bool: - if IS_PYDANTIC_V2: - return model.model_config.get("populate_by_name", False) # type: ignore # Pydantic v2 - return model.__config__.allow_population_by_field_name # type: ignore # Pydantic v1 - - -PydanticField = typing.Union[ModelField, pydantic.fields.FieldInfo] - - -# Pydantic V1 swapped the typing of __fields__'s values from ModelField to FieldInfo -# And so we try to handle both V1 cases, as well as V2 (FieldInfo from model.model_fields) -def _get_model_fields( - model: typing.Type["Model"], -) -> typing.Mapping[str, PydanticField]: - if IS_PYDANTIC_V2: - return model.model_fields # type: ignore # Pydantic v2 - else: - return model.__fields__ # type: ignore # Pydantic v1 - - -def _get_field_default(field: PydanticField) -> typing.Any: - try: - value = field.get_default() # type: ignore # Pydantic < v1.10.15 - except: - value = field.default - if IS_PYDANTIC_V2: - from pydantic_core import PydanticUndefined - - if value == PydanticUndefined: - return None - return value - return value diff --git a/src/humanloop/datasets/__init__.py b/src/humanloop/datasets/__init__.py deleted file mode 100644 index ff5c1227..00000000 --- a/src/humanloop/datasets/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .types import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints - -__all__ = ["ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints"] diff --git a/src/humanloop/datasets/client.py b/src/humanloop/datasets/client.py deleted file mode 100644 index 30c7d310..00000000 --- a/src/humanloop/datasets/client.py +++ /dev/null @@ -1,1330 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .. import core -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..requests.create_datapoint_request import CreateDatapointRequestParams -from ..types.datapoint_response import DatapointResponse -from ..types.dataset_response import DatasetResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.list_datasets import ListDatasets -from ..types.sort_order import SortOrder -from ..types.update_dateset_action import UpdateDatesetAction -from .raw_client import AsyncRawDatasetsClient, RawDatasetsClient -from .types.list_versions_datasets_id_versions_get_request_include_datapoints import ( - ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, -) - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class DatasetsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawDatasetsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawDatasetsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawDatasetsClient - """ - return self._raw_client - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[DatasetResponse]: - """ - List all Datasets. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datasets to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Dataset name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Datasets by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[DatasetResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.datasets.list(size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - def upsert( - self, - *, - datapoints: typing.Sequence[CreateDatapointRequestParams], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - action: typing.Optional[UpdateDatesetAction] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Create a Dataset or update it with a new version if it already exists. - - Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. - - By default, the new Dataset version will be set to the list of Datapoints provided in - the request. You can also create a new version by adding or removing Datapoints from an existing version - by specifying `action` as `add` or `remove` respectively. In this case, you may specify - the `version_id` or `environment` query parameters to identify the existing version to base - the new version on. If neither is provided, the latest created version will be used. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Dataset - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already - exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, - you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. - - Parameters - ---------- - datapoints : typing.Sequence[CreateDatapointRequestParams] - The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - path : typing.Optional[str] - Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Dataset. - - action : typing.Optional[UpdateDatesetAction] - The action to take with the provided Datapoints. - - - If `"set"`, the created version will only contain the Datapoints provided in this request. - - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. - - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. - - If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Dataset version. Version names must be unique for a given Dataset. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.upsert(path='test-questions', datapoints=[{'inputs': {'question': 'What is the capital of France?'}, 'target': {'answer': 'Paris'}}, {'inputs': {'question': 'Who wrote Hamlet?'}, 'target': {'answer': 'William Shakespeare'}}], action="set", version_name='test-questions-v1', version_description='Add two new questions and answers', ) - """ - _response = self._raw_client.upsert( - datapoints=datapoints, - version_id=version_id, - environment=environment, - include_datapoints=include_datapoints, - path=path, - id=id, - action=action, - attributes=attributes, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Retrieve the Dataset with the given ID. - - Unless `include_datapoints` is set to `true`, the response will not include - the Datapoints. - Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently - retrieve Datapoints for a large Dataset. - - By default, the deployed version of the Dataset is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.get(id='ds_b0baF1ca7652', version_id='dsv_6L78pqrdFi2xa', include_datapoints=True, ) - """ - _response = self._raw_client.get( - id, - version_id=version_id, - environment=environment, - include_datapoints=include_datapoints, - request_options=request_options, - ) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.delete(id='id', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Move the Dataset to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - path : typing.Optional[str] - Path of the Dataset including the Dataset name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Dataset, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.move(id='id', ) - """ - _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - def list_datapoints( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[DatapointResponse]: - """ - List all Datapoints for the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datapoints to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[DatapointResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.datasets.list_datapoints(id='ds_b0baF1ca7652', size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list_datapoints( - id, version_id=version_id, environment=environment, page=page, size=size, request_options=request_options - ) - - def list_versions( - self, - id: str, - *, - include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListDatasets: - """ - Get a list of the versions for a Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] - If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListDatasets - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.list_versions(id='ds_b0baF1ca7652', ) - """ - _response = self._raw_client.list_versions( - id, include_datapoints=include_datapoints, request_options=request_options - ) - return _response.data - - def delete_dataset_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.delete_dataset_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.delete_dataset_version(id, version_id, request_options=request_options) - return _response.data - - def update_dataset_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Update the name or description of the Dataset version. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.update_dataset_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.update_dataset_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - def upload_csv( - self, - id: str, - *, - file: core.File, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Add Datapoints from a CSV file to a Dataset. - - This will create a new version of the Dataset with the Datapoints from the CSV file. - - If either `version_id` or `environment` is provided, the new version will be based on the specified version, - with the Datapoints from the CSV file added to the existing Datapoints in the version. - If neither `version_id` nor `environment` is provided, the new version will be based on the version - of the Dataset that is deployed to the default Environment. - - You can optionally provide a name and description for the new version using `version_name` - and `version_description` parameters. - - Parameters - ---------- - id : str - Unique identifier for the Dataset - - file : core.File - See core.File for more documentation - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. - - version_name : typing.Optional[str] - Name for the new Dataset version. - - version_description : typing.Optional[str] - Description for the new Dataset version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.upload_csv(id='id', ) - """ - _response = self._raw_client.upload_csv( - id, - file=file, - version_id=version_id, - environment=environment, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DatasetResponse: - """ - Deploy Dataset to Environment. - - Set the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.set_deployment(id='ds_b0baF1ca7652', environment_id='staging', version_id='dsv_6L78pqrdFi2xa', ) - """ - _response = self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Dataset from Environment. - - Remove the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.remove_deployment(id='ds_b0baF1ca7652', environment_id='staging', ) - """ - _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.datasets.list_environments(id='id', ) - """ - _response = self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - -class AsyncDatasetsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawDatasetsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawDatasetsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawDatasetsClient - """ - return self._raw_client - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[DatasetResponse]: - """ - List all Datasets. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datasets to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Dataset name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Datasets by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[DatasetResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.datasets.list(size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - async def upsert( - self, - *, - datapoints: typing.Sequence[CreateDatapointRequestParams], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - action: typing.Optional[UpdateDatesetAction] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Create a Dataset or update it with a new version if it already exists. - - Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. - - By default, the new Dataset version will be set to the list of Datapoints provided in - the request. You can also create a new version by adding or removing Datapoints from an existing version - by specifying `action` as `add` or `remove` respectively. In this case, you may specify - the `version_id` or `environment` query parameters to identify the existing version to base - the new version on. If neither is provided, the latest created version will be used. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Dataset - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already - exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, - you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. - - Parameters - ---------- - datapoints : typing.Sequence[CreateDatapointRequestParams] - The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - path : typing.Optional[str] - Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Dataset. - - action : typing.Optional[UpdateDatesetAction] - The action to take with the provided Datapoints. - - - If `"set"`, the created version will only contain the Datapoints provided in this request. - - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. - - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. - - If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Dataset version. Version names must be unique for a given Dataset. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.upsert(path='test-questions', datapoints=[{'inputs': {'question': 'What is the capital of France?'}, 'target': {'answer': 'Paris'}}, {'inputs': {'question': 'Who wrote Hamlet?'}, 'target': {'answer': 'William Shakespeare'}}], action="set", version_name='test-questions-v1', version_description='Add two new questions and answers', ) - asyncio.run(main()) - """ - _response = await self._raw_client.upsert( - datapoints=datapoints, - version_id=version_id, - environment=environment, - include_datapoints=include_datapoints, - path=path, - id=id, - action=action, - attributes=attributes, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Retrieve the Dataset with the given ID. - - Unless `include_datapoints` is set to `true`, the response will not include - the Datapoints. - Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently - retrieve Datapoints for a large Dataset. - - By default, the deployed version of the Dataset is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.get(id='ds_b0baF1ca7652', version_id='dsv_6L78pqrdFi2xa', include_datapoints=True, ) - asyncio.run(main()) - """ - _response = await self._raw_client.get( - id, - version_id=version_id, - environment=environment, - include_datapoints=include_datapoints, - request_options=request_options, - ) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.delete(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Move the Dataset to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - path : typing.Optional[str] - Path of the Dataset including the Dataset name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Dataset, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.move(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - async def list_datapoints( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[DatapointResponse]: - """ - List all Datapoints for the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datapoints to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[DatapointResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.datasets.list_datapoints(id='ds_b0baF1ca7652', size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list_datapoints( - id, version_id=version_id, environment=environment, page=page, size=size, request_options=request_options - ) - - async def list_versions( - self, - id: str, - *, - include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListDatasets: - """ - Get a list of the versions for a Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] - If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListDatasets - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.list_versions(id='ds_b0baF1ca7652', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_versions( - id, include_datapoints=include_datapoints, request_options=request_options - ) - return _response.data - - async def delete_dataset_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.delete_dataset_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete_dataset_version(id, version_id, request_options=request_options) - return _response.data - - async def update_dataset_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Update the name or description of the Dataset version. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.update_dataset_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_dataset_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - async def upload_csv( - self, - id: str, - *, - file: core.File, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DatasetResponse: - """ - Add Datapoints from a CSV file to a Dataset. - - This will create a new version of the Dataset with the Datapoints from the CSV file. - - If either `version_id` or `environment` is provided, the new version will be based on the specified version, - with the Datapoints from the CSV file added to the existing Datapoints in the version. - If neither `version_id` nor `environment` is provided, the new version will be based on the version - of the Dataset that is deployed to the default Environment. - - You can optionally provide a name and description for the new version using `version_name` - and `version_description` parameters. - - Parameters - ---------- - id : str - Unique identifier for the Dataset - - file : core.File - See core.File for more documentation - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. - - version_name : typing.Optional[str] - Name for the new Dataset version. - - version_description : typing.Optional[str] - Description for the new Dataset version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.upload_csv(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.upload_csv( - id, - file=file, - version_id=version_id, - environment=environment, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> DatasetResponse: - """ - Deploy Dataset to Environment. - - Set the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DatasetResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.set_deployment(id='ds_b0baF1ca7652', environment_id='staging', version_id='dsv_6L78pqrdFi2xa', ) - asyncio.run(main()) - """ - _response = await self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Dataset from Environment. - - Remove the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.remove_deployment(id='ds_b0baF1ca7652', environment_id='staging', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.datasets.list_environments(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_environments(id, request_options=request_options) - return _response.data diff --git a/src/humanloop/datasets/raw_client.py b/src/humanloop/datasets/raw_client.py deleted file mode 100644 index 774f04fc..00000000 --- a/src/humanloop/datasets/raw_client.py +++ /dev/null @@ -1,1924 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from .. import core -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..requests.create_datapoint_request import CreateDatapointRequestParams -from ..types.datapoint_response import DatapointResponse -from ..types.dataset_response import DatasetResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.http_validation_error import HttpValidationError -from ..types.list_datasets import ListDatasets -from ..types.paginated_datapoint_response import PaginatedDatapointResponse -from ..types.paginated_dataset_response import PaginatedDatasetResponse -from ..types.sort_order import SortOrder -from ..types.update_dateset_action import UpdateDatesetAction -from .types.list_versions_datasets_id_versions_get_request_include_datapoints import ( - ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, -) - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawDatasetsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[DatasetResponse]: - """ - List all Datasets. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datasets to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Dataset name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Datasets by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[DatasetResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "datasets", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDatasetResponse, - construct_type( - type_=PaginatedDatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def upsert( - self, - *, - datapoints: typing.Sequence[CreateDatapointRequestParams], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - action: typing.Optional[UpdateDatesetAction] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[DatasetResponse]: - """ - Create a Dataset or update it with a new version if it already exists. - - Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. - - By default, the new Dataset version will be set to the list of Datapoints provided in - the request. You can also create a new version by adding or removing Datapoints from an existing version - by specifying `action` as `add` or `remove` respectively. In this case, you may specify - the `version_id` or `environment` query parameters to identify the existing version to base - the new version on. If neither is provided, the latest created version will be used. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Dataset - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already - exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, - you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. - - Parameters - ---------- - datapoints : typing.Sequence[CreateDatapointRequestParams] - The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - path : typing.Optional[str] - Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Dataset. - - action : typing.Optional[UpdateDatesetAction] - The action to take with the provided Datapoints. - - - If `"set"`, the created version will only contain the Datapoints provided in this request. - - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. - - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. - - If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Dataset version. Version names must be unique for a given Dataset. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DatasetResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "datasets", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, - json={ - "path": path, - "id": id, - "datapoints": convert_and_respect_annotation_metadata( - object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" - ), - "action": action, - "attributes": attributes, - "version_name": version_name, - "version_description": version_description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[DatasetResponse]: - """ - Retrieve the Dataset with the given ID. - - Unless `include_datapoints` is set to `true`, the response will not include - the Datapoints. - Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently - retrieve Datapoints for a large Dataset. - - By default, the deployed version of the Dataset is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DatasetResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[DatasetResponse]: - """ - Move the Dataset to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - path : typing.Optional[str] - Path of the Dataset including the Dataset name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Dataset, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DatasetResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_datapoints( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[DatapointResponse]: - """ - List all Datapoints for the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datapoints to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[DatapointResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/datapoints", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - "page": page, - "size": size, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDatapointResponse, - construct_type( - type_=PaginatedDatapointResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list_datapoints( - id, - version_id=version_id, - environment=environment, - page=page + 1, - size=size, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_versions( - self, - id: str, - *, - include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ListDatasets]: - """ - Get a list of the versions for a Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] - If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ListDatasets] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "include_datapoints": include_datapoints, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListDatasets, - construct_type( - type_=ListDatasets, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete_dataset_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Delete a version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_dataset_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[DatasetResponse]: - """ - Update the name or description of the Dataset version. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DatasetResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def upload_csv( - self, - id: str, - *, - file: core.File, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[DatasetResponse]: - """ - Add Datapoints from a CSV file to a Dataset. - - This will create a new version of the Dataset with the Datapoints from the CSV file. - - If either `version_id` or `environment` is provided, the new version will be based on the specified version, - with the Datapoints from the CSV file added to the existing Datapoints in the version. - If neither `version_id` nor `environment` is provided, the new version will be based on the version - of the Dataset that is deployed to the default Environment. - - You can optionally provide a name and description for the new version using `version_name` - and `version_description` parameters. - - Parameters - ---------- - id : str - Unique identifier for the Dataset - - file : core.File - See core.File for more documentation - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. - - version_name : typing.Optional[str] - Name for the new Dataset version. - - version_description : typing.Optional[str] - Description for the new Dataset version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DatasetResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/datapoints/csv", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - data={ - "version_name": version_name, - "version_description": version_description, - }, - files={ - "file": file, - }, - headers={ - "content-type": "multipart/form-data", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[DatasetResponse]: - """ - Deploy Dataset to Environment. - - Set the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DatasetResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Remove deployed Dataset from Environment. - - Remove the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawDatasetsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[DatasetResponse]: - """ - List all Datasets. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datasets to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Dataset name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Datasets by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[DatasetResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "datasets", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDatasetResponse, - construct_type( - type_=PaginatedDatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def upsert( - self, - *, - datapoints: typing.Sequence[CreateDatapointRequestParams], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - action: typing.Optional[UpdateDatesetAction] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[DatasetResponse]: - """ - Create a Dataset or update it with a new version if it already exists. - - Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. - - By default, the new Dataset version will be set to the list of Datapoints provided in - the request. You can also create a new version by adding or removing Datapoints from an existing version - by specifying `action` as `add` or `remove` respectively. In this case, you may specify - the `version_id` or `environment` query parameters to identify the existing version to base - the new version on. If neither is provided, the latest created version will be used. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Dataset - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already - exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, - you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. - - Parameters - ---------- - datapoints : typing.Sequence[CreateDatapointRequestParams] - The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - path : typing.Optional[str] - Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Dataset. - - action : typing.Optional[UpdateDatesetAction] - The action to take with the provided Datapoints. - - - If `"set"`, the created version will only contain the Datapoints provided in this request. - - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. - - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. - - If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Dataset version. Version names must be unique for a given Dataset. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DatasetResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "datasets", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, - json={ - "path": path, - "id": id, - "datapoints": convert_and_respect_annotation_metadata( - object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" - ), - "action": action, - "attributes": attributes, - "version_name": version_name, - "version_description": version_description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - include_datapoints: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[DatasetResponse]: - """ - Retrieve the Dataset with the given ID. - - Unless `include_datapoints` is set to `true`, the response will not include - the Datapoints. - Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently - retrieve Datapoints for a large Dataset. - - By default, the deployed version of the Dataset is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_datapoints : typing.Optional[bool] - If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DatasetResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[DatasetResponse]: - """ - Move the Dataset to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - path : typing.Optional[str] - Path of the Dataset including the Dataset name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Dataset, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DatasetResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_datapoints( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[DatapointResponse]: - """ - List all Datapoints for the Dataset with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : typing.Optional[str] - A specific Version ID of the Dataset to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Datapoints to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[DatapointResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/datapoints", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - "page": page, - "size": size, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDatapointResponse, - construct_type( - type_=PaginatedDatapointResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list_datapoints( - id, - version_id=version_id, - environment=environment, - page=page + 1, - size=size, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_versions( - self, - id: str, - *, - include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ListDatasets]: - """ - Get a list of the versions for a Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] - If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ListDatasets] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "include_datapoints": include_datapoints, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListDatasets, - construct_type( - type_=ListDatasets, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete_dataset_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete a version of the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_dataset_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[DatasetResponse]: - """ - Update the name or description of the Dataset version. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - version_id : str - Unique identifier for the specific version of the Dataset. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DatasetResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def upload_csv( - self, - id: str, - *, - file: core.File, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[DatasetResponse]: - """ - Add Datapoints from a CSV file to a Dataset. - - This will create a new version of the Dataset with the Datapoints from the CSV file. - - If either `version_id` or `environment` is provided, the new version will be based on the specified version, - with the Datapoints from the CSV file added to the existing Datapoints in the version. - If neither `version_id` nor `environment` is provided, the new version will be based on the version - of the Dataset that is deployed to the default Environment. - - You can optionally provide a name and description for the new version using `version_name` - and `version_description` parameters. - - Parameters - ---------- - id : str - Unique identifier for the Dataset - - file : core.File - See core.File for more documentation - - version_id : typing.Optional[str] - ID of the specific Dataset version to base the created Version on. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed Version to base the created Version on. - - version_name : typing.Optional[str] - Name for the new Dataset version. - - version_description : typing.Optional[str] - Description for the new Dataset version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DatasetResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/datapoints/csv", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - data={ - "version_name": version_name, - "version_description": version_description, - }, - files={ - "file": file, - }, - headers={ - "content-type": "multipart/form-data", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[DatasetResponse]: - """ - Deploy Dataset to Environment. - - Set the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DatasetResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Remove deployed Dataset from Environment. - - Remove the deployed version for the specified Environment. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Dataset. - - Parameters - ---------- - id : str - Unique identifier for Dataset. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/datasets/types/__init__.py b/src/humanloop/datasets/types/__init__.py deleted file mode 100644 index 419263e1..00000000 --- a/src/humanloop/datasets/types/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .list_versions_datasets_id_versions_get_request_include_datapoints import ( - ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, -) - -__all__ = ["ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints"] diff --git a/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py b/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py deleted file mode 100644 index 6c04f917..00000000 --- a/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints = typing.Union[ - typing.Literal["latest_committed", "latest_saved"], typing.Any -] diff --git a/src/humanloop/directories/__init__.py b/src/humanloop/directories/__init__.py deleted file mode 100644 index 5cde0202..00000000 --- a/src/humanloop/directories/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - diff --git a/src/humanloop/directories/client.py b/src/humanloop/directories/client.py deleted file mode 100644 index 62972278..00000000 --- a/src/humanloop/directories/client.py +++ /dev/null @@ -1,385 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.request_options import RequestOptions -from ..types.directory_response import DirectoryResponse -from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse -from .raw_client import AsyncRawDirectoriesClient, RawDirectoriesClient - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class DirectoriesClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawDirectoriesClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawDirectoriesClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawDirectoriesClient - """ - return self._raw_client - - def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[DirectoryResponse]: - """ - Retrieve a list of all Directories. - - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[DirectoryResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.directories.list() - """ - _response = self._raw_client.list(request_options=request_options) - return _response.data - - def create( - self, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DirectoryResponse: - """ - Creates a Directory. - - Parameters - ---------- - name : typing.Optional[str] - Name of the directory to create. - - parent_id : typing.Optional[str] - ID of the parent directory. Starts with `dir_`. - - path : typing.Optional[str] - Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DirectoryResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.directories.create() - """ - _response = self._raw_client.create(name=name, parent_id=parent_id, path=path, request_options=request_options) - return _response.data - - def get( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> DirectoryWithParentsAndChildrenResponse: - """ - Fetches a directory by ID. - - Parameters - ---------- - id : str - String ID of directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DirectoryWithParentsAndChildrenResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.directories.get(id='id', ) - """ - _response = self._raw_client.get(id, request_options=request_options) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Directory with the given ID. - - The Directory must be empty (i.e. contain no Directories or Files). - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.directories.delete(id='id', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def update( - self, - id: str, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DirectoryResponse: - """ - Update the Directory with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - name : typing.Optional[str] - Name to set for the directory. - - parent_id : typing.Optional[str] - ID of the parent directory. Specify this to move directories. Starts with `dir_`. - - path : typing.Optional[str] - Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DirectoryResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.directories.update(id='id', ) - """ - _response = self._raw_client.update( - id, name=name, parent_id=parent_id, path=path, request_options=request_options - ) - return _response.data - - -class AsyncDirectoriesClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawDirectoriesClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawDirectoriesClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawDirectoriesClient - """ - return self._raw_client - - async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[DirectoryResponse]: - """ - Retrieve a list of all Directories. - - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[DirectoryResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.directories.list() - asyncio.run(main()) - """ - _response = await self._raw_client.list(request_options=request_options) - return _response.data - - async def create( - self, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DirectoryResponse: - """ - Creates a Directory. - - Parameters - ---------- - name : typing.Optional[str] - Name of the directory to create. - - parent_id : typing.Optional[str] - ID of the parent directory. Starts with `dir_`. - - path : typing.Optional[str] - Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DirectoryResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.directories.create() - asyncio.run(main()) - """ - _response = await self._raw_client.create( - name=name, parent_id=parent_id, path=path, request_options=request_options - ) - return _response.data - - async def get( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> DirectoryWithParentsAndChildrenResponse: - """ - Fetches a directory by ID. - - Parameters - ---------- - id : str - String ID of directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DirectoryWithParentsAndChildrenResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.directories.get(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get(id, request_options=request_options) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Directory with the given ID. - - The Directory must be empty (i.e. contain no Directories or Files). - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.directories.delete(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def update( - self, - id: str, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> DirectoryResponse: - """ - Update the Directory with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - name : typing.Optional[str] - Name to set for the directory. - - parent_id : typing.Optional[str] - ID of the parent directory. Specify this to move directories. Starts with `dir_`. - - path : typing.Optional[str] - Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - DirectoryResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.directories.update(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update( - id, name=name, parent_id=parent_id, path=path, request_options=request_options - ) - return _response.data diff --git a/src/humanloop/directories/raw_client.py b/src/humanloop/directories/raw_client.py deleted file mode 100644 index e2f10091..00000000 --- a/src/humanloop/directories/raw_client.py +++ /dev/null @@ -1,596 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.request_options import RequestOptions -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.directory_response import DirectoryResponse -from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse -from ..types.http_validation_error import HttpValidationError - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawDirectoriesClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def list( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[DirectoryResponse]]: - """ - Retrieve a list of all Directories. - - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[DirectoryResponse]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "directories", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[DirectoryResponse], - construct_type( - type_=typing.List[DirectoryResponse], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def create( - self, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[DirectoryResponse]: - """ - Creates a Directory. - - Parameters - ---------- - name : typing.Optional[str] - Name of the directory to create. - - parent_id : typing.Optional[str] - ID of the parent directory. Starts with `dir_`. - - path : typing.Optional[str] - Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DirectoryResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "directories", - method="POST", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[DirectoryWithParentsAndChildrenResponse]: - """ - Fetches a directory by ID. - - Parameters - ---------- - id : str - String ID of directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DirectoryWithParentsAndChildrenResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DirectoryWithParentsAndChildrenResponse, - construct_type( - type_=DirectoryWithParentsAndChildrenResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete the Directory with the given ID. - - The Directory must be empty (i.e. contain no Directories or Files). - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update( - self, - id: str, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[DirectoryResponse]: - """ - Update the Directory with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - name : typing.Optional[str] - Name to set for the directory. - - parent_id : typing.Optional[str] - ID of the parent directory. Specify this to move directories. Starts with `dir_`. - - path : typing.Optional[str] - Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[DirectoryResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="PATCH", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawDirectoriesClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def list( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[DirectoryResponse]]: - """ - Retrieve a list of all Directories. - - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[DirectoryResponse]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "directories", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[DirectoryResponse], - construct_type( - type_=typing.List[DirectoryResponse], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def create( - self, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[DirectoryResponse]: - """ - Creates a Directory. - - Parameters - ---------- - name : typing.Optional[str] - Name of the directory to create. - - parent_id : typing.Optional[str] - ID of the parent directory. Starts with `dir_`. - - path : typing.Optional[str] - Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DirectoryResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "directories", - method="POST", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[DirectoryWithParentsAndChildrenResponse]: - """ - Fetches a directory by ID. - - Parameters - ---------- - id : str - String ID of directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DirectoryWithParentsAndChildrenResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DirectoryWithParentsAndChildrenResponse, - construct_type( - type_=DirectoryWithParentsAndChildrenResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete the Directory with the given ID. - - The Directory must be empty (i.e. contain no Directories or Files). - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update( - self, - id: str, - *, - name: typing.Optional[str] = OMIT, - parent_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[DirectoryResponse]: - """ - Update the Directory with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Directory. Starts with `dir_`. - - name : typing.Optional[str] - Name to set for the directory. - - parent_id : typing.Optional[str] - ID of the parent directory. Specify this to move directories. Starts with `dir_`. - - path : typing.Optional[str] - Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[DirectoryResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="PATCH", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/environment.py b/src/humanloop/environment.py deleted file mode 100644 index b9263608..00000000 --- a/src/humanloop/environment.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import enum - - -class HumanloopEnvironment(enum.Enum): - DEFAULT = "https://api.humanloop.com/v5" diff --git a/src/humanloop/errors/__init__.py b/src/humanloop/errors/__init__.py deleted file mode 100644 index 67183e01..00000000 --- a/src/humanloop/errors/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .unprocessable_entity_error import UnprocessableEntityError - -__all__ = ["UnprocessableEntityError"] diff --git a/src/humanloop/errors/unprocessable_entity_error.py b/src/humanloop/errors/unprocessable_entity_error.py deleted file mode 100644 index d3f9c5d8..00000000 --- a/src/humanloop/errors/unprocessable_entity_error.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..core.api_error import ApiError -from ..types.http_validation_error import HttpValidationError - - -class UnprocessableEntityError(ApiError): - def __init__(self, body: HttpValidationError, headers: typing.Optional[typing.Dict[str, str]] = None): - super().__init__(status_code=422, headers=headers, body=body) diff --git a/src/humanloop/evaluations/__init__.py b/src/humanloop/evaluations/__init__.py deleted file mode 100644 index 3498bb70..00000000 --- a/src/humanloop/evaluations/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .types import ( - AddEvaluatorsRequestEvaluatorsItem, - CreateEvaluationRequestEvaluatorsItem, - CreateRunRequestDataset, - CreateRunRequestVersion, -) -from .requests import ( - AddEvaluatorsRequestEvaluatorsItemParams, - CreateEvaluationRequestEvaluatorsItemParams, - CreateRunRequestDatasetParams, - CreateRunRequestVersionParams, -) - -__all__ = [ - "AddEvaluatorsRequestEvaluatorsItem", - "AddEvaluatorsRequestEvaluatorsItemParams", - "CreateEvaluationRequestEvaluatorsItem", - "CreateEvaluationRequestEvaluatorsItemParams", - "CreateRunRequestDataset", - "CreateRunRequestDatasetParams", - "CreateRunRequestVersion", - "CreateRunRequestVersionParams", -] diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py deleted file mode 100644 index 006fb99b..00000000 --- a/src/humanloop/evaluations/client.py +++ /dev/null @@ -1,1177 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..requests.file_request import FileRequestParams -from ..types.evaluation_response import EvaluationResponse -from ..types.evaluation_run_response import EvaluationRunResponse -from ..types.evaluation_runs_response import EvaluationRunsResponse -from ..types.evaluation_stats import EvaluationStats -from ..types.evaluation_status import EvaluationStatus -from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse -from .raw_client import AsyncRawEvaluationsClient, RawEvaluationsClient -from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams -from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams -from .requests.create_run_request_dataset import CreateRunRequestDatasetParams -from .requests.create_run_request_version import CreateRunRequestVersionParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class EvaluationsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawEvaluationsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawEvaluationsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawEvaluationsClient - """ - return self._raw_client - - def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[EvaluationResponse]: - """ - Retrieve a list of Evaluations for the specified File. - - Parameters - ---------- - file_id : str - Filter by File ID. Only Evaluations for the specified File will be returned. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluations to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[EvaluationResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.evaluations.list(file_id='pr_30gco7dx6JDq4200GVOHa', size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list(file_id=file_id, page=page, size=size, request_options=request_options) - - def create( - self, - *, - evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - file: typing.Optional[FileRequestParams] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationResponse: - """ - Create an Evaluation. - - Create a new Evaluation by specifying the File to evaluate, and a name - for the Evaluation. - You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. - - Parameters - ---------- - evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] - The Evaluators used to evaluate. - - file : typing.Optional[FileRequestParams] - The File to associate with the Evaluation. This File contains the Logs you're evaluating. - - name : typing.Optional[str] - Name of the Evaluation to help identify it. Must be unique within the associated File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.create(evaluators=[{'version_id': 'version_id'}], ) - """ - _response = self._raw_client.create( - evaluators=evaluators, file=file, name=name, request_options=request_options - ) - return _response.data - - def add_evaluators( - self, - id: str, - *, - evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationResponse: - """ - Add Evaluators to an Evaluation. - - The Evaluators will be run on the Logs generated for the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] - The Evaluators to add to this Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.add_evaluators(id='id', evaluators=[{'version_id': 'version_id'}], ) - """ - _response = self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options) - return _response.data - - def remove_evaluator( - self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluationResponse: - """ - Remove an Evaluator from an Evaluation. - - The Evaluator will no longer be run on the Logs in the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluator_version_id : str - Unique identifier for Evaluator Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.remove_evaluator(id='id', evaluator_version_id='evaluator_version_id', ) - """ - _response = self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options) - return _response.data - - def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse: - """ - Get an Evaluation. - - This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, - such as its name. - - To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. - To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.get(id='ev_567yza', ) - """ - _response = self._raw_client.get(id, request_options=request_options) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete an Evaluation. - - The Runs and Evaluators in the Evaluation will not be deleted. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.delete(id='ev_567yza', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def list_runs_for_evaluation( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluationRunsResponse: - """ - List all Runs for an Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunsResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.list_runs_for_evaluation(id='id', ) - """ - _response = self._raw_client.list_runs_for_evaluation(id, request_options=request_options) - return _response.data - - def create_run( - self, - id: str, - *, - dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, - version: typing.Optional[CreateRunRequestVersionParams] = OMIT, - orchestrated: typing.Optional[bool] = OMIT, - use_existing_logs: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationRunResponse: - """ - Create an Evaluation Run. - - Optionally specify the Dataset and version to be evaluated. - - Humanloop will automatically start generating Logs and running Evaluators where - `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` - and then generate and submit the required Logs via the API. - - If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, - avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` - referencing a datapoint in the specified Dataset will be associated with the Run. - - To keep updated on the progress of the Run, you can poll the Run using - the `GET /evaluations/{id}/runs` endpoint and check its status. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - dataset : typing.Optional[CreateRunRequestDatasetParams] - Dataset to use in this Run. - - version : typing.Optional[CreateRunRequestVersionParams] - Version to use in this Run. - - orchestrated : typing.Optional[bool] - Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - - use_existing_logs : typing.Optional[bool] - If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.create_run(id='id', ) - """ - _response = self._raw_client.create_run( - id, - dataset=dataset, - version=version, - orchestrated=orchestrated, - use_existing_logs=use_existing_logs, - request_options=request_options, - ) - return _response.data - - def add_existing_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Add an existing Run to the specified Evaluation. - - This is useful if you want to compare the Runs in this Evaluation with an existing Run - that exists within another Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.add_existing_run(id='id', run_id='run_id', ) - """ - _response = self._raw_client.add_existing_run(id, run_id, request_options=request_options) - return _response.data - - def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Remove a Run from an Evaluation. - - The Logs and Versions used in the Run will not be deleted. - If this Run is used in any other Evaluations, it will still be available in those Evaluations. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.remove_run(id='id', run_id='run_id', ) - """ - _response = self._raw_client.remove_run(id, run_id, request_options=request_options) - return _response.data - - def update_evaluation_run( - self, - id: str, - run_id: str, - *, - control: typing.Optional[bool] = OMIT, - status: typing.Optional[EvaluationStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationRunResponse: - """ - Update an Evaluation Run. - - Specify `control=true` to use this Run as the control Run for the Evaluation. - You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - control : typing.Optional[bool] - If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. - - status : typing.Optional[EvaluationStatus] - Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.update_evaluation_run(id='id', run_id='run_id', ) - """ - _response = self._raw_client.update_evaluation_run( - id, run_id, control=control, status=status, request_options=request_options - ) - return _response.data - - def add_logs_to_run( - self, - id: str, - run_id: str, - *, - log_ids: typing.Sequence[str], - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationRunResponse: - """ - Add the specified Logs to a Run. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - log_ids : typing.Sequence[str] - The IDs of the Logs to add to the Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.add_logs_to_run(id='id', run_id='run_id', log_ids=['log_ids'], ) - """ - _response = self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options) - return _response.data - - def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats: - """ - Get Evaluation Stats. - - Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the - corresponding Evaluator statistics (such as the mean and percentiles). - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationStats - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.get_stats(id='id', ) - """ - _response = self._raw_client.get_stats(id, request_options=request_options) - return _response.data - - def get_logs( - self, - id: str, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataEvaluationLogResponse: - """ - Get the Logs associated to a specific Evaluation. - - This returns the Logs associated to all Runs within with the Evaluation. - - Parameters - ---------- - id : str - String ID of evaluation. Starts with `ev_` or `evr_`. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Filter by Run IDs. Only Logs for the specified Runs will be returned. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedDataEvaluationLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluations.get_logs(id='id', ) - """ - _response = self._raw_client.get_logs(id, page=page, size=size, run_id=run_id, request_options=request_options) - return _response.data - - -class AsyncEvaluationsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawEvaluationsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawEvaluationsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawEvaluationsClient - """ - return self._raw_client - - async def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[EvaluationResponse]: - """ - Retrieve a list of Evaluations for the specified File. - - Parameters - ---------- - file_id : str - Filter by File ID. Only Evaluations for the specified File will be returned. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluations to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[EvaluationResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.evaluations.list(file_id='pr_30gco7dx6JDq4200GVOHa', size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list(file_id=file_id, page=page, size=size, request_options=request_options) - - async def create( - self, - *, - evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - file: typing.Optional[FileRequestParams] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationResponse: - """ - Create an Evaluation. - - Create a new Evaluation by specifying the File to evaluate, and a name - for the Evaluation. - You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. - - Parameters - ---------- - evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] - The Evaluators used to evaluate. - - file : typing.Optional[FileRequestParams] - The File to associate with the Evaluation. This File contains the Logs you're evaluating. - - name : typing.Optional[str] - Name of the Evaluation to help identify it. Must be unique within the associated File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.create(evaluators=[{'version_id': 'version_id'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.create( - evaluators=evaluators, file=file, name=name, request_options=request_options - ) - return _response.data - - async def add_evaluators( - self, - id: str, - *, - evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationResponse: - """ - Add Evaluators to an Evaluation. - - The Evaluators will be run on the Logs generated for the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] - The Evaluators to add to this Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.add_evaluators(id='id', evaluators=[{'version_id': 'version_id'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options) - return _response.data - - async def remove_evaluator( - self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluationResponse: - """ - Remove an Evaluator from an Evaluation. - - The Evaluator will no longer be run on the Logs in the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluator_version_id : str - Unique identifier for Evaluator Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.remove_evaluator(id='id', evaluator_version_id='evaluator_version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options) - return _response.data - - async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse: - """ - Get an Evaluation. - - This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, - such as its name. - - To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. - To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.get(id='ev_567yza', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get(id, request_options=request_options) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete an Evaluation. - - The Runs and Evaluators in the Evaluation will not be deleted. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.delete(id='ev_567yza', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def list_runs_for_evaluation( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluationRunsResponse: - """ - List all Runs for an Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunsResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.list_runs_for_evaluation(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_runs_for_evaluation(id, request_options=request_options) - return _response.data - - async def create_run( - self, - id: str, - *, - dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, - version: typing.Optional[CreateRunRequestVersionParams] = OMIT, - orchestrated: typing.Optional[bool] = OMIT, - use_existing_logs: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationRunResponse: - """ - Create an Evaluation Run. - - Optionally specify the Dataset and version to be evaluated. - - Humanloop will automatically start generating Logs and running Evaluators where - `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` - and then generate and submit the required Logs via the API. - - If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, - avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` - referencing a datapoint in the specified Dataset will be associated with the Run. - - To keep updated on the progress of the Run, you can poll the Run using - the `GET /evaluations/{id}/runs` endpoint and check its status. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - dataset : typing.Optional[CreateRunRequestDatasetParams] - Dataset to use in this Run. - - version : typing.Optional[CreateRunRequestVersionParams] - Version to use in this Run. - - orchestrated : typing.Optional[bool] - Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - - use_existing_logs : typing.Optional[bool] - If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.create_run(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.create_run( - id, - dataset=dataset, - version=version, - orchestrated=orchestrated, - use_existing_logs=use_existing_logs, - request_options=request_options, - ) - return _response.data - - async def add_existing_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Add an existing Run to the specified Evaluation. - - This is useful if you want to compare the Runs in this Evaluation with an existing Run - that exists within another Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.add_existing_run(id='id', run_id='run_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.add_existing_run(id, run_id, request_options=request_options) - return _response.data - - async def remove_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove a Run from an Evaluation. - - The Logs and Versions used in the Run will not be deleted. - If this Run is used in any other Evaluations, it will still be available in those Evaluations. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.remove_run(id='id', run_id='run_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_run(id, run_id, request_options=request_options) - return _response.data - - async def update_evaluation_run( - self, - id: str, - run_id: str, - *, - control: typing.Optional[bool] = OMIT, - status: typing.Optional[EvaluationStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationRunResponse: - """ - Update an Evaluation Run. - - Specify `control=true` to use this Run as the control Run for the Evaluation. - You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - control : typing.Optional[bool] - If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. - - status : typing.Optional[EvaluationStatus] - Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.update_evaluation_run(id='id', run_id='run_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_evaluation_run( - id, run_id, control=control, status=status, request_options=request_options - ) - return _response.data - - async def add_logs_to_run( - self, - id: str, - run_id: str, - *, - log_ids: typing.Sequence[str], - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluationRunResponse: - """ - Add the specified Logs to a Run. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - log_ids : typing.Sequence[str] - The IDs of the Logs to add to the Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationRunResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.add_logs_to_run(id='id', run_id='run_id', log_ids=['log_ids'], ) - asyncio.run(main()) - """ - _response = await self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options) - return _response.data - - async def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats: - """ - Get Evaluation Stats. - - Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the - corresponding Evaluator statistics (such as the mean and percentiles). - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluationStats - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.get_stats(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get_stats(id, request_options=request_options) - return _response.data - - async def get_logs( - self, - id: str, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataEvaluationLogResponse: - """ - Get the Logs associated to a specific Evaluation. - - This returns the Logs associated to all Runs within with the Evaluation. - - Parameters - ---------- - id : str - String ID of evaluation. Starts with `ev_` or `evr_`. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Filter by Run IDs. Only Logs for the specified Runs will be returned. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedDataEvaluationLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluations.get_logs(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get_logs( - id, page=page, size=size, run_id=run_id, request_options=request_options - ) - return _response.data diff --git a/src/humanloop/evaluations/raw_client.py b/src/humanloop/evaluations/raw_client.py deleted file mode 100644 index 85c3dbf3..00000000 --- a/src/humanloop/evaluations/raw_client.py +++ /dev/null @@ -1,1845 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..requests.file_request import FileRequestParams -from ..types.evaluation_response import EvaluationResponse -from ..types.evaluation_run_response import EvaluationRunResponse -from ..types.evaluation_runs_response import EvaluationRunsResponse -from ..types.evaluation_stats import EvaluationStats -from ..types.evaluation_status import EvaluationStatus -from ..types.http_validation_error import HttpValidationError -from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse -from ..types.paginated_evaluation_response import PaginatedEvaluationResponse -from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams -from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams -from .requests.create_run_request_dataset import CreateRunRequestDatasetParams -from .requests.create_run_request_version import CreateRunRequestVersionParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawEvaluationsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[EvaluationResponse]: - """ - Retrieve a list of Evaluations for the specified File. - - Parameters - ---------- - file_id : str - Filter by File ID. Only Evaluations for the specified File will be returned. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluations to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[EvaluationResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "evaluations", - method="GET", - params={ - "file_id": file_id, - "page": page, - "size": size, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedEvaluationResponse, - construct_type( - type_=PaginatedEvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - file_id=file_id, - page=page + 1, - size=size, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def create( - self, - *, - evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - file: typing.Optional[FileRequestParams] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluationResponse]: - """ - Create an Evaluation. - - Create a new Evaluation by specifying the File to evaluate, and a name - for the Evaluation. - You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. - - Parameters - ---------- - evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] - The Evaluators used to evaluate. - - file : typing.Optional[FileRequestParams] - The File to associate with the Evaluation. This File contains the Logs you're evaluating. - - name : typing.Optional[str] - Name of the Evaluation to help identify it. Must be unique within the associated File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "evaluations", - method="POST", - json={ - "file": convert_and_respect_annotation_metadata( - object_=file, annotation=FileRequestParams, direction="write" - ), - "name": name, - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def add_evaluators( - self, - id: str, - *, - evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluationResponse]: - """ - Add Evaluators to an Evaluation. - - The Evaluators will be run on the Logs generated for the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] - The Evaluators to add to this Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_evaluator( - self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[EvaluationResponse]: - """ - Remove an Evaluator from an Evaluation. - - The Evaluator will no longer be run on the Logs in the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluator_version_id : str - Unique identifier for Evaluator Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[EvaluationResponse]: - """ - Get an Evaluation. - - This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, - such as its name. - - To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. - To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete an Evaluation. - - The Runs and Evaluators in the Evaluation will not be deleted. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_runs_for_evaluation( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[EvaluationRunsResponse]: - """ - List all Runs for an Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationRunsResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunsResponse, - construct_type( - type_=EvaluationRunsResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def create_run( - self, - id: str, - *, - dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, - version: typing.Optional[CreateRunRequestVersionParams] = OMIT, - orchestrated: typing.Optional[bool] = OMIT, - use_existing_logs: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluationRunResponse]: - """ - Create an Evaluation Run. - - Optionally specify the Dataset and version to be evaluated. - - Humanloop will automatically start generating Logs and running Evaluators where - `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` - and then generate and submit the required Logs via the API. - - If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, - avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` - referencing a datapoint in the specified Dataset will be associated with the Run. - - To keep updated on the progress of the Run, you can poll the Run using - the `GET /evaluations/{id}/runs` endpoint and check its status. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - dataset : typing.Optional[CreateRunRequestDatasetParams] - Dataset to use in this Run. - - version : typing.Optional[CreateRunRequestVersionParams] - Version to use in this Run. - - orchestrated : typing.Optional[bool] - Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - - use_existing_logs : typing.Optional[bool] - If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationRunResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="POST", - json={ - "dataset": convert_and_respect_annotation_metadata( - object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" - ), - "version": convert_and_respect_annotation_metadata( - object_=version, annotation=CreateRunRequestVersionParams, direction="write" - ), - "orchestrated": orchestrated, - "use_existing_logs": use_existing_logs, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def add_existing_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.Optional[typing.Any]]: - """ - Add an existing Run to the specified Evaluation. - - This is useful if you want to compare the Runs in this Evaluation with an existing Run - that exists within another Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.Optional[typing.Any]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Remove a Run from an Evaluation. - - The Logs and Versions used in the Run will not be deleted. - If this Run is used in any other Evaluations, it will still be available in those Evaluations. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_evaluation_run( - self, - id: str, - run_id: str, - *, - control: typing.Optional[bool] = OMIT, - status: typing.Optional[EvaluationStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluationRunResponse]: - """ - Update an Evaluation Run. - - Specify `control=true` to use this Run as the control Run for the Evaluation. - You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - control : typing.Optional[bool] - If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. - - status : typing.Optional[EvaluationStatus] - Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationRunResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="PATCH", - json={ - "control": control, - "status": status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def add_logs_to_run( - self, - id: str, - run_id: str, - *, - log_ids: typing.Sequence[str], - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluationRunResponse]: - """ - Add the specified Logs to a Run. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - log_ids : typing.Sequence[str] - The IDs of the Logs to add to the Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationRunResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", - method="POST", - json={ - "log_ids": log_ids, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get_stats( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[EvaluationStats]: - """ - Get Evaluation Stats. - - Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the - corresponding Evaluator statistics (such as the mean and percentiles). - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluationStats] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/stats", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationStats, - construct_type( - type_=EvaluationStats, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get_logs( - self, - id: str, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PaginatedDataEvaluationLogResponse]: - """ - Get the Logs associated to a specific Evaluation. - - This returns the Logs associated to all Runs within with the Evaluation. - - Parameters - ---------- - id : str - String ID of evaluation. Starts with `ev_` or `evr_`. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Filter by Run IDs. Only Logs for the specified Runs will be returned. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PaginatedDataEvaluationLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/logs", - method="GET", - params={ - "page": page, - "size": size, - "run_id": run_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PaginatedDataEvaluationLogResponse, - construct_type( - type_=PaginatedDataEvaluationLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawEvaluationsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[EvaluationResponse]: - """ - Retrieve a list of Evaluations for the specified File. - - Parameters - ---------- - file_id : str - Filter by File ID. Only Evaluations for the specified File will be returned. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluations to fetch. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[EvaluationResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "evaluations", - method="GET", - params={ - "file_id": file_id, - "page": page, - "size": size, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedEvaluationResponse, - construct_type( - type_=PaginatedEvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - file_id=file_id, - page=page + 1, - size=size, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def create( - self, - *, - evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - file: typing.Optional[FileRequestParams] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluationResponse]: - """ - Create an Evaluation. - - Create a new Evaluation by specifying the File to evaluate, and a name - for the Evaluation. - You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. - - Parameters - ---------- - evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] - The Evaluators used to evaluate. - - file : typing.Optional[FileRequestParams] - The File to associate with the Evaluation. This File contains the Logs you're evaluating. - - name : typing.Optional[str] - Name of the Evaluation to help identify it. Must be unique within the associated File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "evaluations", - method="POST", - json={ - "file": convert_and_respect_annotation_metadata( - object_=file, annotation=FileRequestParams, direction="write" - ), - "name": name, - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def add_evaluators( - self, - id: str, - *, - evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluationResponse]: - """ - Add Evaluators to an Evaluation. - - The Evaluators will be run on the Logs generated for the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] - The Evaluators to add to this Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_evaluator( - self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[EvaluationResponse]: - """ - Remove an Evaluator from an Evaluation. - - The Evaluator will no longer be run on the Logs in the Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - evaluator_version_id : str - Unique identifier for Evaluator Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[EvaluationResponse]: - """ - Get an Evaluation. - - This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, - such as its name. - - To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. - To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete an Evaluation. - - The Runs and Evaluators in the Evaluation will not be deleted. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_runs_for_evaluation( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[EvaluationRunsResponse]: - """ - List all Runs for an Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationRunsResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunsResponse, - construct_type( - type_=EvaluationRunsResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def create_run( - self, - id: str, - *, - dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, - version: typing.Optional[CreateRunRequestVersionParams] = OMIT, - orchestrated: typing.Optional[bool] = OMIT, - use_existing_logs: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluationRunResponse]: - """ - Create an Evaluation Run. - - Optionally specify the Dataset and version to be evaluated. - - Humanloop will automatically start generating Logs and running Evaluators where - `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` - and then generate and submit the required Logs via the API. - - If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, - avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` - referencing a datapoint in the specified Dataset will be associated with the Run. - - To keep updated on the progress of the Run, you can poll the Run using - the `GET /evaluations/{id}/runs` endpoint and check its status. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - dataset : typing.Optional[CreateRunRequestDatasetParams] - Dataset to use in this Run. - - version : typing.Optional[CreateRunRequestVersionParams] - Version to use in this Run. - - orchestrated : typing.Optional[bool] - Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - - use_existing_logs : typing.Optional[bool] - If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationRunResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="POST", - json={ - "dataset": convert_and_respect_annotation_metadata( - object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" - ), - "version": convert_and_respect_annotation_metadata( - object_=version, annotation=CreateRunRequestVersionParams, direction="write" - ), - "orchestrated": orchestrated, - "use_existing_logs": use_existing_logs, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def add_existing_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.Optional[typing.Any]]: - """ - Add an existing Run to the specified Evaluation. - - This is useful if you want to compare the Runs in this Evaluation with an existing Run - that exists within another Evaluation. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.Optional[typing.Any]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Remove a Run from an Evaluation. - - The Logs and Versions used in the Run will not be deleted. - If this Run is used in any other Evaluations, it will still be available in those Evaluations. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_evaluation_run( - self, - id: str, - run_id: str, - *, - control: typing.Optional[bool] = OMIT, - status: typing.Optional[EvaluationStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluationRunResponse]: - """ - Update an Evaluation Run. - - Specify `control=true` to use this Run as the control Run for the Evaluation. - You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - control : typing.Optional[bool] - If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. - - status : typing.Optional[EvaluationStatus] - Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationRunResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="PATCH", - json={ - "control": control, - "status": status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def add_logs_to_run( - self, - id: str, - run_id: str, - *, - log_ids: typing.Sequence[str], - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluationRunResponse]: - """ - Add the specified Logs to a Run. - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - run_id : str - Unique identifier for Run. - - log_ids : typing.Sequence[str] - The IDs of the Logs to add to the Run. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationRunResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", - method="POST", - json={ - "log_ids": log_ids, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get_stats( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[EvaluationStats]: - """ - Get Evaluation Stats. - - Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the - corresponding Evaluator statistics (such as the mean and percentiles). - - Parameters - ---------- - id : str - Unique identifier for Evaluation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluationStats] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/stats", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluationStats, - construct_type( - type_=EvaluationStats, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get_logs( - self, - id: str, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PaginatedDataEvaluationLogResponse]: - """ - Get the Logs associated to a specific Evaluation. - - This returns the Logs associated to all Runs within with the Evaluation. - - Parameters - ---------- - id : str - String ID of evaluation. Starts with `ev_` or `evr_`. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Filter by Run IDs. Only Logs for the specified Runs will be returned. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PaginatedDataEvaluationLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/logs", - method="GET", - params={ - "page": page, - "size": size, - "run_id": run_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PaginatedDataEvaluationLogResponse, - construct_type( - type_=PaginatedDataEvaluationLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/evaluations/requests/__init__.py b/src/humanloop/evaluations/requests/__init__.py deleted file mode 100644 index 1997f1a0..00000000 --- a/src/humanloop/evaluations/requests/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams -from .create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams -from .create_run_request_dataset import CreateRunRequestDatasetParams -from .create_run_request_version import CreateRunRequestVersionParams - -__all__ = [ - "AddEvaluatorsRequestEvaluatorsItemParams", - "CreateEvaluationRequestEvaluatorsItemParams", - "CreateRunRequestDatasetParams", - "CreateRunRequestVersionParams", -] diff --git a/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py b/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py deleted file mode 100644 index 24da1248..00000000 --- a/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.evaluator_file_id import EvaluatorFileIdParams -from ...requests.evaluator_file_path import EvaluatorFilePathParams -from ...requests.evaluator_version_id import EvaluatorVersionIdParams - -AddEvaluatorsRequestEvaluatorsItemParams = typing.Union[ - EvaluatorVersionIdParams, EvaluatorFileIdParams, EvaluatorFilePathParams -] diff --git a/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py b/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py deleted file mode 100644 index a53624c0..00000000 --- a/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.evaluator_file_id import EvaluatorFileIdParams -from ...requests.evaluator_file_path import EvaluatorFilePathParams -from ...requests.evaluator_version_id import EvaluatorVersionIdParams - -CreateEvaluationRequestEvaluatorsItemParams = typing.Union[ - EvaluatorVersionIdParams, EvaluatorFileIdParams, EvaluatorFilePathParams -] diff --git a/src/humanloop/evaluations/requests/create_run_request_dataset.py b/src/humanloop/evaluations/requests/create_run_request_dataset.py deleted file mode 100644 index cabeb7f2..00000000 --- a/src/humanloop/evaluations/requests/create_run_request_dataset.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.file_id import FileIdParams -from ...requests.file_path import FilePathParams -from ...requests.version_id import VersionIdParams - -CreateRunRequestDatasetParams = typing.Union[VersionIdParams, FileIdParams, FilePathParams] diff --git a/src/humanloop/evaluations/requests/create_run_request_version.py b/src/humanloop/evaluations/requests/create_run_request_version.py deleted file mode 100644 index 830ee49e..00000000 --- a/src/humanloop/evaluations/requests/create_run_request_version.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.file_id import FileIdParams -from ...requests.file_path import FilePathParams -from ...requests.version_id import VersionIdParams - -CreateRunRequestVersionParams = typing.Union[VersionIdParams, FileIdParams, FilePathParams] diff --git a/src/humanloop/evaluations/types/__init__.py b/src/humanloop/evaluations/types/__init__.py deleted file mode 100644 index 508249fb..00000000 --- a/src/humanloop/evaluations/types/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItem -from .create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItem -from .create_run_request_dataset import CreateRunRequestDataset -from .create_run_request_version import CreateRunRequestVersion - -__all__ = [ - "AddEvaluatorsRequestEvaluatorsItem", - "CreateEvaluationRequestEvaluatorsItem", - "CreateRunRequestDataset", - "CreateRunRequestVersion", -] diff --git a/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py b/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py deleted file mode 100644 index 3e4bbe23..00000000 --- a/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.evaluator_file_id import EvaluatorFileId -from ...types.evaluator_file_path import EvaluatorFilePath -from ...types.evaluator_version_id import EvaluatorVersionId - -AddEvaluatorsRequestEvaluatorsItem = typing.Union[EvaluatorVersionId, EvaluatorFileId, EvaluatorFilePath] diff --git a/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py b/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py deleted file mode 100644 index 448585eb..00000000 --- a/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.evaluator_file_id import EvaluatorFileId -from ...types.evaluator_file_path import EvaluatorFilePath -from ...types.evaluator_version_id import EvaluatorVersionId - -CreateEvaluationRequestEvaluatorsItem = typing.Union[EvaluatorVersionId, EvaluatorFileId, EvaluatorFilePath] diff --git a/src/humanloop/evaluations/types/create_run_request_dataset.py b/src/humanloop/evaluations/types/create_run_request_dataset.py deleted file mode 100644 index b915987e..00000000 --- a/src/humanloop/evaluations/types/create_run_request_dataset.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.file_id import FileId -from ...types.file_path import FilePath -from ...types.version_id import VersionId - -CreateRunRequestDataset = typing.Union[VersionId, FileId, FilePath] diff --git a/src/humanloop/evaluations/types/create_run_request_version.py b/src/humanloop/evaluations/types/create_run_request_version.py deleted file mode 100644 index 6d383dd8..00000000 --- a/src/humanloop/evaluations/types/create_run_request_version.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.file_id import FileId -from ...types.file_path import FilePath -from ...types.version_id import VersionId - -CreateRunRequestVersion = typing.Union[VersionId, FileId, FilePath] diff --git a/src/humanloop/evaluators/__init__.py b/src/humanloop/evaluators/__init__.py deleted file mode 100644 index 480476b3..00000000 --- a/src/humanloop/evaluators/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .types import CreateEvaluatorLogRequestJudgment, CreateEvaluatorLogRequestSpec, EvaluatorRequestSpec -from .requests import ( - CreateEvaluatorLogRequestJudgmentParams, - CreateEvaluatorLogRequestSpecParams, - EvaluatorRequestSpecParams, -) - -__all__ = [ - "CreateEvaluatorLogRequestJudgment", - "CreateEvaluatorLogRequestJudgmentParams", - "CreateEvaluatorLogRequestSpec", - "CreateEvaluatorLogRequestSpecParams", - "EvaluatorRequestSpec", - "EvaluatorRequestSpecParams", -] diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py deleted file mode 100644 index 69fff10c..00000000 --- a/src/humanloop/evaluators/client.py +++ /dev/null @@ -1,1411 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse -from ..types.evaluator_response import EvaluatorResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.list_evaluators import ListEvaluators -from ..types.log_status import LogStatus -from ..types.sort_order import SortOrder -from .raw_client import AsyncRawEvaluatorsClient, RawEvaluatorsClient -from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams -from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams -from .requests.evaluator_request_spec import EvaluatorRequestSpecParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class EvaluatorsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawEvaluatorsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawEvaluatorsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawEvaluatorsClient - """ - return self._raw_client - - def log( - self, - *, - parent_id: str, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - create_evaluator_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, - marked_completed: typing.Optional[bool] = OMIT, - spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateEvaluatorLogResponse: - """ - Submit Evaluator judgment for an existing Log. - - Creates a new Log. The evaluated Log will be set as the parent of the created Log. - - Parameters - ---------- - parent_id : str - Identifier of the evaluated Log. The newly created Log will have this one set as parent. - - version_id : typing.Optional[str] - ID of the Evaluator version to log against. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from the LLM. Only populated for LLM Evaluator Logs. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. Only populated for LLM Evaluator Logs. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. Only populated for LLM Evaluator Logs. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - create_evaluator_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the LLM. Only populated for LLM Evaluator Logs. - - judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] - Evaluator assessment of the Log. - - marked_completed : typing.Optional[bool] - Whether the Log has been manually marked as completed by a user. - - spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateEvaluatorLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.log(parent_id='parent_id', ) - """ - _response = self._raw_client.log( - parent_id=parent_id, - version_id=version_id, - environment=environment, - path=path, - id=id, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - create_evaluator_log_request_environment=create_evaluator_log_request_environment, - save=save, - log_id=log_id, - output_message=output_message, - judgment=judgment, - marked_completed=marked_completed, - spec=spec, - request_options=request_options, - ) - return _response.data - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[EvaluatorResponse]: - """ - Get a list of all Evaluators. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluators to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Evaluator name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Evaluators by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[EvaluatorResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.evaluators.list(size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - def upsert( - self, - *, - spec: EvaluatorRequestSpecParams, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Create an Evaluator or update it with a new version if it already exists. - - Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Evaluator - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - spec : EvaluatorRequestSpecParams - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - version_name : typing.Optional[str] - Unique name for the Evaluator version. Version names must be unique for a given Evaluator. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.upsert(path='Shared Evaluators/Accuracy Evaluator', spec={'arguments_type': "target_required", 'return_type': "number", 'evaluator_type': 'python', 'code': 'def evaluate(answer, target):\n return 0.5'}, version_name='simple-evaluator', version_description='Simple evaluator that returns 0.5', ) - """ - _response = self._raw_client.upsert( - spec=spec, - path=path, - id=id, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Retrieve the Evaluator with the given ID. - - By default, the deployed version of the Evaluator is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : typing.Optional[str] - A specific Version ID of the Evaluator to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.get(id='ev_890bcd', ) - """ - _response = self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Evaluator with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.delete(id='ev_890bcd', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Move the Evaluator to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - path : typing.Optional[str] - Path of the Evaluator including the Evaluator name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Evaluator, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.move(id='ev_890bcd', path='new directory/new name', ) - """ - _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListEvaluators: - """ - Get a list of all the versions of an Evaluator. - - Parameters - ---------- - id : str - Unique identifier for the Evaluator. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListEvaluators - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.list_versions(id='ev_890bcd', ) - """ - _response = self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - def delete_evaluator_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.delete_evaluator_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options) - return _response.data - - def update_evaluator_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Update the name or description of the Evaluator version. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.update_evaluator_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.update_evaluator_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluatorResponse: - """ - Deploy Evaluator to an Environment. - - Set the deployed version for the specified Environment. This Evaluator - will be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.set_deployment(id='ev_890bcd', environment_id='staging', version_id='evv_012def', ) - """ - _response = self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Evaluator from the Environment. - - Remove the deployed version for the specified Environment. This Evaluator - will no longer be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.remove_deployment(id='ev_890bcd', environment_id='staging', ) - """ - _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.list_environments(id='ev_890bcd', ) - """ - _response = self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Activate and deactivate Evaluators for monitoring the Evaluator. - - An activated Evaluator will automatically be run on all new Logs - within the Evaluator for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.evaluators.update_monitoring(id='id', ) - """ - _response = self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - -class AsyncEvaluatorsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawEvaluatorsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawEvaluatorsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawEvaluatorsClient - """ - return self._raw_client - - async def log( - self, - *, - parent_id: str, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - create_evaluator_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, - marked_completed: typing.Optional[bool] = OMIT, - spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateEvaluatorLogResponse: - """ - Submit Evaluator judgment for an existing Log. - - Creates a new Log. The evaluated Log will be set as the parent of the created Log. - - Parameters - ---------- - parent_id : str - Identifier of the evaluated Log. The newly created Log will have this one set as parent. - - version_id : typing.Optional[str] - ID of the Evaluator version to log against. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from the LLM. Only populated for LLM Evaluator Logs. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. Only populated for LLM Evaluator Logs. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. Only populated for LLM Evaluator Logs. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - create_evaluator_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the LLM. Only populated for LLM Evaluator Logs. - - judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] - Evaluator assessment of the Log. - - marked_completed : typing.Optional[bool] - Whether the Log has been manually marked as completed by a user. - - spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateEvaluatorLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.log(parent_id='parent_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.log( - parent_id=parent_id, - version_id=version_id, - environment=environment, - path=path, - id=id, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - create_evaluator_log_request_environment=create_evaluator_log_request_environment, - save=save, - log_id=log_id, - output_message=output_message, - judgment=judgment, - marked_completed=marked_completed, - spec=spec, - request_options=request_options, - ) - return _response.data - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[EvaluatorResponse]: - """ - Get a list of all Evaluators. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluators to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Evaluator name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Evaluators by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[EvaluatorResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.evaluators.list(size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - async def upsert( - self, - *, - spec: EvaluatorRequestSpecParams, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Create an Evaluator or update it with a new version if it already exists. - - Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Evaluator - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - spec : EvaluatorRequestSpecParams - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - version_name : typing.Optional[str] - Unique name for the Evaluator version. Version names must be unique for a given Evaluator. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.upsert(path='Shared Evaluators/Accuracy Evaluator', spec={'arguments_type': "target_required", 'return_type': "number", 'evaluator_type': 'python', 'code': 'def evaluate(answer, target):\n return 0.5'}, version_name='simple-evaluator', version_description='Simple evaluator that returns 0.5', ) - asyncio.run(main()) - """ - _response = await self._raw_client.upsert( - spec=spec, - path=path, - id=id, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Retrieve the Evaluator with the given ID. - - By default, the deployed version of the Evaluator is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : typing.Optional[str] - A specific Version ID of the Evaluator to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.get(id='ev_890bcd', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Evaluator with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.delete(id='ev_890bcd', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Move the Evaluator to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - path : typing.Optional[str] - Path of the Evaluator including the Evaluator name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Evaluator, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.move(id='ev_890bcd', path='new directory/new name', ) - asyncio.run(main()) - """ - _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListEvaluators: - """ - Get a list of all the versions of an Evaluator. - - Parameters - ---------- - id : str - Unique identifier for the Evaluator. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListEvaluators - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.list_versions(id='ev_890bcd', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - async def delete_evaluator_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.delete_evaluator_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options) - return _response.data - - async def update_evaluator_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Update the name or description of the Evaluator version. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.update_evaluator_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_evaluator_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluatorResponse: - """ - Deploy Evaluator to an Environment. - - Set the deployed version for the specified Environment. This Evaluator - will be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.set_deployment(id='ev_890bcd', environment_id='staging', version_id='evv_012def', ) - asyncio.run(main()) - """ - _response = await self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Evaluator from the Environment. - - Remove the deployed version for the specified Environment. This Evaluator - will no longer be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.remove_deployment(id='ev_890bcd', environment_id='staging', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.list_environments(id='ev_890bcd', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> EvaluatorResponse: - """ - Activate and deactivate Evaluators for monitoring the Evaluator. - - An activated Evaluator will automatically be run on all new Logs - within the Evaluator for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - EvaluatorResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.evaluators.update_monitoring(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data diff --git a/src/humanloop/evaluators/raw_client.py b/src/humanloop/evaluators/raw_client.py deleted file mode 100644 index 8aeb32bc..00000000 --- a/src/humanloop/evaluators/raw_client.py +++ /dev/null @@ -1,2014 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse -from ..types.evaluator_response import EvaluatorResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.http_validation_error import HttpValidationError -from ..types.list_evaluators import ListEvaluators -from ..types.log_status import LogStatus -from ..types.paginated_data_evaluator_response import PaginatedDataEvaluatorResponse -from ..types.sort_order import SortOrder -from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams -from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams -from .requests.evaluator_request_spec import EvaluatorRequestSpecParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawEvaluatorsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def log( - self, - *, - parent_id: str, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - create_evaluator_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, - marked_completed: typing.Optional[bool] = OMIT, - spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[CreateEvaluatorLogResponse]: - """ - Submit Evaluator judgment for an existing Log. - - Creates a new Log. The evaluated Log will be set as the parent of the created Log. - - Parameters - ---------- - parent_id : str - Identifier of the evaluated Log. The newly created Log will have this one set as parent. - - version_id : typing.Optional[str] - ID of the Evaluator version to log against. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from the LLM. Only populated for LLM Evaluator Logs. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. Only populated for LLM Evaluator Logs. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. Only populated for LLM Evaluator Logs. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - create_evaluator_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the LLM. Only populated for LLM Evaluator Logs. - - judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] - Evaluator assessment of the Log. - - marked_completed : typing.Optional[bool] - Whether the Log has been manually marked as completed by a user. - - spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[CreateEvaluatorLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "evaluators/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "parent_id": parent_id, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": create_evaluator_log_request_environment, - "save": save, - "log_id": log_id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "judgment": convert_and_respect_annotation_metadata( - object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" - ), - "marked_completed": marked_completed, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateEvaluatorLogResponse, - construct_type( - type_=CreateEvaluatorLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[EvaluatorResponse]: - """ - Get a list of all Evaluators. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluators to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Evaluator name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Evaluators by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[EvaluatorResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "evaluators", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataEvaluatorResponse, - construct_type( - type_=PaginatedDataEvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def upsert( - self, - *, - spec: EvaluatorRequestSpecParams, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluatorResponse]: - """ - Create an Evaluator or update it with a new version if it already exists. - - Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Evaluator - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - spec : EvaluatorRequestSpecParams - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - version_name : typing.Optional[str] - Unique name for the Evaluator version. Version names must be unique for a given Evaluator. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluatorResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "evaluators", - method="POST", - json={ - "path": path, - "id": id, - "version_name": version_name, - "version_description": version_description, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluatorResponse]: - """ - Retrieve the Evaluator with the given ID. - - By default, the deployed version of the Evaluator is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : typing.Optional[str] - A specific Version ID of the Evaluator to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluatorResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete the Evaluator with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluatorResponse]: - """ - Move the Evaluator to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - path : typing.Optional[str] - Path of the Evaluator including the Evaluator name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Evaluator, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluatorResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ListEvaluators]: - """ - Get a list of all the versions of an Evaluator. - - Parameters - ---------- - id : str - Unique identifier for the Evaluator. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ListEvaluators] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListEvaluators, - construct_type( - type_=ListEvaluators, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete_evaluator_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Delete a version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_evaluator_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluatorResponse]: - """ - Update the name or description of the Evaluator version. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluatorResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[EvaluatorResponse]: - """ - Deploy Evaluator to an Environment. - - Set the deployed version for the specified Environment. This Evaluator - will be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluatorResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Remove deployed Evaluator from the Environment. - - Remove the deployed version for the specified Environment. This Evaluator - will no longer be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[EvaluatorResponse]: - """ - Activate and deactivate Evaluators for monitoring the Evaluator. - - An activated Evaluator will automatically be run on all new Logs - within the Evaluator for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[EvaluatorResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawEvaluatorsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def log( - self, - *, - parent_id: str, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - create_evaluator_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, - marked_completed: typing.Optional[bool] = OMIT, - spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[CreateEvaluatorLogResponse]: - """ - Submit Evaluator judgment for an existing Log. - - Creates a new Log. The evaluated Log will be set as the parent of the created Log. - - Parameters - ---------- - parent_id : str - Identifier of the evaluated Log. The newly created Log will have this one set as parent. - - version_id : typing.Optional[str] - ID of the Evaluator version to log against. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from the LLM. Only populated for LLM Evaluator Logs. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. Only populated for LLM Evaluator Logs. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. Only populated for LLM Evaluator Logs. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - create_evaluator_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the LLM. Only populated for LLM Evaluator Logs. - - judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] - Evaluator assessment of the Log. - - marked_completed : typing.Optional[bool] - Whether the Log has been manually marked as completed by a user. - - spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[CreateEvaluatorLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "evaluators/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "parent_id": parent_id, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": create_evaluator_log_request_environment, - "save": save, - "log_id": log_id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "judgment": convert_and_respect_annotation_metadata( - object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" - ), - "marked_completed": marked_completed, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateEvaluatorLogResponse, - construct_type( - type_=CreateEvaluatorLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[EvaluatorResponse]: - """ - Get a list of all Evaluators. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Evaluators to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Evaluator name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Evaluators by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[EvaluatorResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "evaluators", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataEvaluatorResponse, - construct_type( - type_=PaginatedDataEvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def upsert( - self, - *, - spec: EvaluatorRequestSpecParams, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluatorResponse]: - """ - Create an Evaluator or update it with a new version if it already exists. - - Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within an Evaluator - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - spec : EvaluatorRequestSpecParams - - path : typing.Optional[str] - Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Evaluator. - - version_name : typing.Optional[str] - Unique name for the Evaluator version. Version names must be unique for a given Evaluator. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluatorResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "evaluators", - method="POST", - json={ - "path": path, - "id": id, - "version_name": version_name, - "version_description": version_description, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluatorResponse]: - """ - Retrieve the Evaluator with the given ID. - - By default, the deployed version of the Evaluator is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : typing.Optional[str] - A specific Version ID of the Evaluator to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluatorResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete the Evaluator with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluatorResponse]: - """ - Move the Evaluator to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - path : typing.Optional[str] - Path of the Evaluator including the Evaluator name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Evaluator, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluatorResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ListEvaluators]: - """ - Get a list of all the versions of an Evaluator. - - Parameters - ---------- - id : str - Unique identifier for the Evaluator. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ListEvaluators] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListEvaluators, - construct_type( - type_=ListEvaluators, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete_evaluator_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete a version of the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_evaluator_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluatorResponse]: - """ - Update the name or description of the Evaluator version. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluatorResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[EvaluatorResponse]: - """ - Deploy Evaluator to an Environment. - - Set the deployed version for the specified Environment. This Evaluator - will be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluatorResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Remove deployed Evaluator from the Environment. - - Remove the deployed version for the specified Environment. This Evaluator - will no longer be used for calls made to the Evaluator in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Evaluator. - - Parameters - ---------- - id : str - Unique identifier for Evaluator. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[EvaluatorResponse]: - """ - Activate and deactivate Evaluators for monitoring the Evaluator. - - An activated Evaluator will automatically be run on all new Logs - within the Evaluator for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[EvaluatorResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/evaluators/requests/__init__.py b/src/humanloop/evaluators/requests/__init__.py deleted file mode 100644 index 6a00390a..00000000 --- a/src/humanloop/evaluators/requests/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams -from .create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams -from .evaluator_request_spec import EvaluatorRequestSpecParams - -__all__ = [ - "CreateEvaluatorLogRequestJudgmentParams", - "CreateEvaluatorLogRequestSpecParams", - "EvaluatorRequestSpecParams", -] diff --git a/src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py b/src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py deleted file mode 100644 index 680abe1f..00000000 --- a/src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateEvaluatorLogRequestJudgmentParams = typing.Union[bool, str, typing.Sequence[str], float] diff --git a/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py b/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py deleted file mode 100644 index 0e6539ed..00000000 --- a/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.code_evaluator_request import CodeEvaluatorRequestParams -from ...requests.external_evaluator_request import ExternalEvaluatorRequestParams -from ...requests.human_evaluator_request import HumanEvaluatorRequestParams -from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams - -CreateEvaluatorLogRequestSpecParams = typing.Union[ - LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams -] diff --git a/src/humanloop/evaluators/requests/evaluator_request_spec.py b/src/humanloop/evaluators/requests/evaluator_request_spec.py deleted file mode 100644 index 7bd0d395..00000000 --- a/src/humanloop/evaluators/requests/evaluator_request_spec.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.code_evaluator_request import CodeEvaluatorRequestParams -from ...requests.external_evaluator_request import ExternalEvaluatorRequestParams -from ...requests.human_evaluator_request import HumanEvaluatorRequestParams -from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams - -EvaluatorRequestSpecParams = typing.Union[ - LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams -] diff --git a/src/humanloop/evaluators/types/__init__.py b/src/humanloop/evaluators/types/__init__.py deleted file mode 100644 index 09e95d81..00000000 --- a/src/humanloop/evaluators/types/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgment -from .create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpec -from .evaluator_request_spec import EvaluatorRequestSpec - -__all__ = ["CreateEvaluatorLogRequestJudgment", "CreateEvaluatorLogRequestSpec", "EvaluatorRequestSpec"] diff --git a/src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py b/src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py deleted file mode 100644 index d3098335..00000000 --- a/src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateEvaluatorLogRequestJudgment = typing.Union[bool, str, typing.List[str], float] diff --git a/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py b/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py deleted file mode 100644 index 0f22560c..00000000 --- a/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.code_evaluator_request import CodeEvaluatorRequest -from ...types.external_evaluator_request import ExternalEvaluatorRequest -from ...types.human_evaluator_request import HumanEvaluatorRequest -from ...types.llm_evaluator_request import LlmEvaluatorRequest - -CreateEvaluatorLogRequestSpec = typing.Union[ - LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest -] diff --git a/src/humanloop/evaluators/types/evaluator_request_spec.py b/src/humanloop/evaluators/types/evaluator_request_spec.py deleted file mode 100644 index 3f31af3f..00000000 --- a/src/humanloop/evaluators/types/evaluator_request_spec.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.code_evaluator_request import CodeEvaluatorRequest -from ...types.external_evaluator_request import ExternalEvaluatorRequest -from ...types.human_evaluator_request import HumanEvaluatorRequest -from ...types.llm_evaluator_request import LlmEvaluatorRequest - -EvaluatorRequestSpec = typing.Union[ - LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest -] diff --git a/src/humanloop/files/__init__.py b/src/humanloop/files/__init__.py deleted file mode 100644 index 7b3a69b5..00000000 --- a/src/humanloop/files/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .types import RetrieveByPathFilesRetrieveByPathPostResponse -from .requests import RetrieveByPathFilesRetrieveByPathPostResponseParams - -__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponse", "RetrieveByPathFilesRetrieveByPathPostResponseParams"] diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py deleted file mode 100644 index 407ba0e9..00000000 --- a/src/humanloop/files/client.py +++ /dev/null @@ -1,301 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.request_options import RequestOptions -from ..types.file_sort_by import FileSortBy -from ..types.file_type import FileType -from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, -) -from ..types.sort_order import SortOrder -from .raw_client import AsyncRawFilesClient, RawFilesClient -from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class FilesClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawFilesClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawFilesClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawFilesClient - """ - return self._raw_client - - def list_files( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - path: typing.Optional[str] = None, - template: typing.Optional[bool] = None, - type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, - environment: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: - """ - Get a paginated list of files. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of files to fetch. - - name : typing.Optional[str] - Case-insensitive filter for file name. - - path : typing.Optional[str] - Path of the directory to filter for. Returns files in this directory and all its subdirectories. - - template : typing.Optional[bool] - Filter to include only template files. - - type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] - List of file types to filter for. - - environment : typing.Optional[str] - Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. - - sort_by : typing.Optional[FileSortBy] - Field to sort files by - - order : typing.Optional[SortOrder] - Direction to sort by. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.files.list_files() - """ - _response = self._raw_client.list_files( - page=page, - size=size, - name=name, - path=path, - template=template, - type=type, - environment=environment, - sort_by=sort_by, - order=order, - include_raw_file_content=include_raw_file_content, - request_options=request_options, - ) - return _response.data - - def retrieve_by_path( - self, - *, - path: str, - environment: typing.Optional[str] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> RetrieveByPathFilesRetrieveByPathPostResponse: - """ - Retrieve a File by path. - - Parameters - ---------- - path : str - Path of the File to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RetrieveByPathFilesRetrieveByPathPostResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.files.retrieve_by_path(path='path', ) - """ - _response = self._raw_client.retrieve_by_path( - path=path, - environment=environment, - include_raw_file_content=include_raw_file_content, - request_options=request_options, - ) - return _response.data - - -class AsyncFilesClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawFilesClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawFilesClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawFilesClient - """ - return self._raw_client - - async def list_files( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - path: typing.Optional[str] = None, - template: typing.Optional[bool] = None, - type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, - environment: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: - """ - Get a paginated list of files. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of files to fetch. - - name : typing.Optional[str] - Case-insensitive filter for file name. - - path : typing.Optional[str] - Path of the directory to filter for. Returns files in this directory and all its subdirectories. - - template : typing.Optional[bool] - Filter to include only template files. - - type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] - List of file types to filter for. - - environment : typing.Optional[str] - Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. - - sort_by : typing.Optional[FileSortBy] - Field to sort files by - - order : typing.Optional[SortOrder] - Direction to sort by. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.files.list_files() - asyncio.run(main()) - """ - _response = await self._raw_client.list_files( - page=page, - size=size, - name=name, - path=path, - template=template, - type=type, - environment=environment, - sort_by=sort_by, - order=order, - include_raw_file_content=include_raw_file_content, - request_options=request_options, - ) - return _response.data - - async def retrieve_by_path( - self, - *, - path: str, - environment: typing.Optional[str] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> RetrieveByPathFilesRetrieveByPathPostResponse: - """ - Retrieve a File by path. - - Parameters - ---------- - path : str - Path of the File to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - RetrieveByPathFilesRetrieveByPathPostResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.files.retrieve_by_path(path='path', ) - asyncio.run(main()) - """ - _response = await self._raw_client.retrieve_by_path( - path=path, - environment=environment, - include_raw_file_content=include_raw_file_content, - request_options=request_options, - ) - return _response.data diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py deleted file mode 100644 index 2f5f2d05..00000000 --- a/src/humanloop/files/raw_client.py +++ /dev/null @@ -1,382 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.request_options import RequestOptions -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.file_sort_by import FileSortBy -from ..types.file_type import FileType -from ..types.http_validation_error import HttpValidationError -from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, -) -from ..types.sort_order import SortOrder -from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawFilesClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def list_files( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - path: typing.Optional[str] = None, - template: typing.Optional[bool] = None, - type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, - environment: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse - ]: - """ - Get a paginated list of files. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of files to fetch. - - name : typing.Optional[str] - Case-insensitive filter for file name. - - path : typing.Optional[str] - Path of the directory to filter for. Returns files in this directory and all its subdirectories. - - template : typing.Optional[bool] - Filter to include only template files. - - type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] - List of file types to filter for. - - environment : typing.Optional[str] - Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. - - sort_by : typing.Optional[FileSortBy] - Field to sort files by - - order : typing.Optional[SortOrder] - Direction to sort by. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "files", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "path": path, - "template": template, - "type": type, - "environment": environment, - "sort_by": sort_by, - "order": order, - "include_raw_file_content": include_raw_file_content, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, - construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def retrieve_by_path( - self, - *, - path: str, - environment: typing.Optional[str] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]: - """ - Retrieve a File by path. - - Parameters - ---------- - path : str - Path of the File to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "files/retrieve-by-path", - method="POST", - params={ - "environment": environment, - "include_raw_file_content": include_raw_file_content, - }, - json={ - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - RetrieveByPathFilesRetrieveByPathPostResponse, - construct_type( - type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawFilesClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def list_files( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - path: typing.Optional[str] = None, - template: typing.Optional[bool] = None, - type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, - environment: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse - ]: - """ - Get a paginated list of files. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of files to fetch. - - name : typing.Optional[str] - Case-insensitive filter for file name. - - path : typing.Optional[str] - Path of the directory to filter for. Returns files in this directory and all its subdirectories. - - template : typing.Optional[bool] - Filter to include only template files. - - type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] - List of file types to filter for. - - environment : typing.Optional[str] - Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. - - sort_by : typing.Optional[FileSortBy] - Field to sort files by - - order : typing.Optional[SortOrder] - Direction to sort by. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "files", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "path": path, - "template": template, - "type": type, - "environment": environment, - "sort_by": sort_by, - "order": order, - "include_raw_file_content": include_raw_file_content, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, - construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def retrieve_by_path( - self, - *, - path: str, - environment: typing.Optional[str] = None, - include_raw_file_content: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]: - """ - Retrieve a File by path. - - Parameters - ---------- - path : str - Path of the File to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - include_raw_file_content : typing.Optional[bool] - Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "files/retrieve-by-path", - method="POST", - params={ - "environment": environment, - "include_raw_file_content": include_raw_file_content, - }, - json={ - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - RetrieveByPathFilesRetrieveByPathPostResponse, - construct_type( - type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/files/requests/__init__.py b/src/humanloop/files/requests/__init__.py deleted file mode 100644 index c4ae6bb0..00000000 --- a/src/humanloop/files/requests/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponseParams - -__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponseParams"] diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py deleted file mode 100644 index 20c1bef0..00000000 --- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.agent_response import AgentResponseParams -from ...requests.dataset_response import DatasetResponseParams -from ...requests.evaluator_response import EvaluatorResponseParams -from ...requests.flow_response import FlowResponseParams -from ...requests.prompt_response import PromptResponseParams -from ...requests.tool_response import ToolResponseParams - -RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[ - PromptResponseParams, - ToolResponseParams, - DatasetResponseParams, - EvaluatorResponseParams, - FlowResponseParams, - AgentResponseParams, -] diff --git a/src/humanloop/files/types/__init__.py b/src/humanloop/files/types/__init__.py deleted file mode 100644 index c34673a3..00000000 --- a/src/humanloop/files/types/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse - -__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponse"] diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py deleted file mode 100644 index c3dd6cb7..00000000 --- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.agent_response import AgentResponse -from ...types.dataset_response import DatasetResponse -from ...types.evaluator_response import EvaluatorResponse -from ...types.flow_response import FlowResponse -from ...types.prompt_response import PromptResponse -from ...types.tool_response import ToolResponse - -RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse -] diff --git a/src/humanloop/flows/__init__.py b/src/humanloop/flows/__init__.py deleted file mode 100644 index 5cde0202..00000000 --- a/src/humanloop/flows/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py deleted file mode 100644 index 8fae2360..00000000 --- a/src/humanloop/flows/client.py +++ /dev/null @@ -1,1583 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.flow_kernel_request import FlowKernelRequestParams -from ..types.create_flow_log_response import CreateFlowLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.flow_log_response import FlowLogResponse -from ..types.flow_response import FlowResponse -from ..types.list_flows import ListFlows -from ..types.log_status import LogStatus -from ..types.sort_order import SortOrder -from .raw_client import AsyncRawFlowsClient, RawFlowsClient - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class FlowsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawFlowsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawFlowsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawFlowsClient - """ - return self._raw_client - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - flow_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - flow: typing.Optional[FlowKernelRequestParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateFlowLogResponse: - """ - Log to a Flow. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Flow. Otherwise, the default deployed version will be chosen. - - If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Flow to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - start_time : typing.Optional[dt.datetime] - The start time of the Trace. Will be updated if a child Log with an earlier start time is added. - - end_time : typing.Optional[dt.datetime] - The end time of the Trace. Will be updated if a child Log with a later end time is added. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - flow_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - flow : typing.Optional[FlowKernelRequestParams] - Flow used to generate the Trace. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateFlowLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - import datetime - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {'template': 'You are a helpful assistant helping with medical anamnesis', 'model': 'gpt-4o', 'temperature': 0.8} - , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} - }}, inputs={'question': 'Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath.' - }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="incomplete", start_time=datetime.datetime.fromisoformat("2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat("2024-07-08 21:40:39+00:00", ), ) - """ - _response = self._raw_client.log( - version_id=version_id, - environment=environment, - messages=messages, - output_message=output_message, - run_id=run_id, - path=path, - id=id, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - flow_log_request_environment=flow_log_request_environment, - save=save, - log_id=log_id, - flow=flow, - request_options=request_options, - ) - return _response.data - - def update_log( - self, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowLogResponse: - """ - Update the status, inputs, output of a Flow Log. - - Marking a Flow Log as complete will trigger any monitoring Evaluators to run. - Inputs and output (or error) must be provided in order to mark it as complete. - - The end_time log attribute will be set to match the time the log is marked as complete. - - Parameters - ---------- - log_id : str - Unique identifier of the Flow Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath.' - }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="complete", ) - """ - _response = self._raw_client.update_log( - log_id, - messages=messages, - output_message=output_message, - inputs=inputs, - output=output, - error=error, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Retrieve the Flow with the given ID. - - By default, the deployed version of the Flow is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : typing.Optional[str] - A specific Version ID of the Flow to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.get(id='fl_6o701g4jmcanPVHxdqD0O', ) - """ - _response = self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Flow with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.delete(id='fl_6o701g4jmcanPVHxdqD0O', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Move the Flow to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.move(id='fl_6o701g4jmcanPVHxdqD0O', path='new directory/new name', ) - """ - _response = self._raw_client.move( - id, path=path, name=name, directory_id=directory_id, request_options=request_options - ) - return _response.data - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[FlowResponse]: - """ - Get a list of Flows. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Flows to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Flow name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Flows by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[FlowResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.flows.list(size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - def upsert( - self, - *, - attributes: typing.Dict[str, typing.Optional[typing.Any]], - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Create or update a Flow. - - Flows can also be identified by the `ID` or their `path`. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Flow - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - attributes : typing.Dict[str, typing.Optional[typing.Any]] - A key-value object identifying the Flow Version. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - version_name : typing.Optional[str] - Unique name for the Flow version. Version names must be unique for a given Flow. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.upsert(path='Personal Projects/MedQA Flow', attributes={'prompt': {'template': 'You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}', 'model': 'gpt-4o', 'temperature': 0.8} - , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} - , 'version_name': 'medqa-flow-v1' - , 'version_description': 'Initial version' - }, ) - """ - _response = self._raw_client.upsert( - attributes=attributes, - path=path, - id=id, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListFlows: - """ - Get a list of all the versions of a Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListFlows - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.list_versions(id='fl_6o701g4jmcanPVHxdqD0O', ) - """ - _response = self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - def delete_flow_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.delete_flow_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.delete_flow_version(id, version_id, request_options=request_options) - return _response.data - - def update_flow_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Update the name or description of the Flow version. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.update_flow_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.update_flow_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FlowResponse: - """ - Deploy Flow to an Environment. - - Set the deployed version for the specified Environment. This Flow - will be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.set_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', version_id='flv_6o701g4jmcanPVHxdqD0O', ) - """ - _response = self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Flow from the Environment. - - Remove the deployed version for the specified Environment. This Flow - will no longer be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.remove_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', ) - """ - _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.list_environments(id='fl_6o701g4jmcanPVHxdqD0O', ) - """ - _response = self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Activate and deactivate Evaluators for monitoring the Flow. - - An activated Evaluator will automatically be run on all new "completed" Logs - within the Flow for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.flows.update_monitoring(id='fl_6o701g4jmcanPVHxdqD0O', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) - """ - _response = self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - -class AsyncFlowsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawFlowsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawFlowsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawFlowsClient - """ - return self._raw_client - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - flow_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - flow: typing.Optional[FlowKernelRequestParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateFlowLogResponse: - """ - Log to a Flow. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Flow. Otherwise, the default deployed version will be chosen. - - If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Flow to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - start_time : typing.Optional[dt.datetime] - The start time of the Trace. Will be updated if a child Log with an earlier start time is added. - - end_time : typing.Optional[dt.datetime] - The end time of the Trace. Will be updated if a child Log with a later end time is added. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - flow_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - flow : typing.Optional[FlowKernelRequestParams] - Flow used to generate the Trace. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateFlowLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import datetime - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {'template': 'You are a helpful assistant helping with medical anamnesis', 'model': 'gpt-4o', 'temperature': 0.8} - , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} - }}, inputs={'question': 'Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath.' - }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="incomplete", start_time=datetime.datetime.fromisoformat("2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat("2024-07-08 21:40:39+00:00", ), ) - asyncio.run(main()) - """ - _response = await self._raw_client.log( - version_id=version_id, - environment=environment, - messages=messages, - output_message=output_message, - run_id=run_id, - path=path, - id=id, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - flow_log_request_environment=flow_log_request_environment, - save=save, - log_id=log_id, - flow=flow, - request_options=request_options, - ) - return _response.data - - async def update_log( - self, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowLogResponse: - """ - Update the status, inputs, output of a Flow Log. - - Marking a Flow Log as complete will trigger any monitoring Evaluators to run. - Inputs and output (or error) must be provided in order to mark it as complete. - - The end_time log attribute will be set to match the time the log is marked as complete. - - Parameters - ---------- - log_id : str - Unique identifier of the Flow Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath.' - }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="complete", ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_log( - log_id, - messages=messages, - output_message=output_message, - inputs=inputs, - output=output, - error=error, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Retrieve the Flow with the given ID. - - By default, the deployed version of the Flow is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : typing.Optional[str] - A specific Version ID of the Flow to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.get(id='fl_6o701g4jmcanPVHxdqD0O', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Flow with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.delete(id='fl_6o701g4jmcanPVHxdqD0O', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Move the Flow to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.move(id='fl_6o701g4jmcanPVHxdqD0O', path='new directory/new name', ) - asyncio.run(main()) - """ - _response = await self._raw_client.move( - id, path=path, name=name, directory_id=directory_id, request_options=request_options - ) - return _response.data - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[FlowResponse]: - """ - Get a list of Flows. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Flows to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Flow name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Flows by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[FlowResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.flows.list(size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - async def upsert( - self, - *, - attributes: typing.Dict[str, typing.Optional[typing.Any]], - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Create or update a Flow. - - Flows can also be identified by the `ID` or their `path`. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Flow - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - attributes : typing.Dict[str, typing.Optional[typing.Any]] - A key-value object identifying the Flow Version. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - version_name : typing.Optional[str] - Unique name for the Flow version. Version names must be unique for a given Flow. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.upsert(path='Personal Projects/MedQA Flow', attributes={'prompt': {'template': 'You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}', 'model': 'gpt-4o', 'temperature': 0.8} - , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} - , 'version_name': 'medqa-flow-v1' - , 'version_description': 'Initial version' - }, ) - asyncio.run(main()) - """ - _response = await self._raw_client.upsert( - attributes=attributes, - path=path, - id=id, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListFlows: - """ - Get a list of all the versions of a Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListFlows - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.list_versions(id='fl_6o701g4jmcanPVHxdqD0O', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - async def delete_flow_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.delete_flow_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete_flow_version(id, version_id, request_options=request_options) - return _response.data - - async def update_flow_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Update the name or description of the Flow version. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.update_flow_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_flow_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> FlowResponse: - """ - Deploy Flow to an Environment. - - Set the deployed version for the specified Environment. This Flow - will be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.set_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', version_id='flv_6o701g4jmcanPVHxdqD0O', ) - asyncio.run(main()) - """ - _response = await self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Flow from the Environment. - - Remove the deployed version for the specified Environment. This Flow - will no longer be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.remove_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.list_environments(id='fl_6o701g4jmcanPVHxdqD0O', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FlowResponse: - """ - Activate and deactivate Evaluators for monitoring the Flow. - - An activated Evaluator will automatically be run on all new "completed" Logs - within the Flow for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FlowResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.flows.update_monitoring(id='fl_6o701g4jmcanPVHxdqD0O', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data diff --git a/src/humanloop/flows/raw_client.py b/src/humanloop/flows/raw_client.py deleted file mode 100644 index e3954572..00000000 --- a/src/humanloop/flows/raw_client.py +++ /dev/null @@ -1,2217 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.flow_kernel_request import FlowKernelRequestParams -from ..types.create_flow_log_response import CreateFlowLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.flow_log_response import FlowLogResponse -from ..types.flow_response import FlowResponse -from ..types.http_validation_error import HttpValidationError -from ..types.list_flows import ListFlows -from ..types.log_status import LogStatus -from ..types.paginated_data_flow_response import PaginatedDataFlowResponse -from ..types.sort_order import SortOrder - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawFlowsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - flow_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - flow: typing.Optional[FlowKernelRequestParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[CreateFlowLogResponse]: - """ - Log to a Flow. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Flow. Otherwise, the default deployed version will be chosen. - - If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Flow to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - start_time : typing.Optional[dt.datetime] - The start time of the Trace. Will be updated if a child Log with an earlier start time is added. - - end_time : typing.Optional[dt.datetime] - The end time of the Trace. Will be updated if a child Log with a later end time is added. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - flow_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - flow : typing.Optional[FlowKernelRequestParams] - Flow used to generate the Trace. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[CreateFlowLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "flows/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "run_id": run_id, - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": flow_log_request_environment, - "save": save, - "log_id": log_id, - "flow": convert_and_respect_annotation_metadata( - object_=flow, annotation=FlowKernelRequestParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateFlowLogResponse, - construct_type( - type_=CreateFlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_log( - self, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[FlowLogResponse]: - """ - Update the status, inputs, output of a Flow Log. - - Marking a Flow Log as complete will trigger any monitoring Evaluators to run. - Inputs and output (or error) must be provided in order to mark it as complete. - - The end_time log attribute will be set to match the time the log is marked as complete. - - Parameters - ---------- - log_id : str - Unique identifier of the Flow Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[FlowLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/logs/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "inputs": inputs, - "output": output, - "error": error, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowLogResponse, - construct_type( - type_=FlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[FlowResponse]: - """ - Retrieve the Flow with the given ID. - - By default, the deployed version of the Flow is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : typing.Optional[str] - A specific Version ID of the Flow to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[FlowResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete the Flow with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[FlowResponse]: - """ - Move the Flow to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[FlowResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - "directory_id": directory_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[FlowResponse]: - """ - Get a list of Flows. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Flows to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Flow name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Flows by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[FlowResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "flows", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataFlowResponse, - construct_type( - type_=PaginatedDataFlowResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def upsert( - self, - *, - attributes: typing.Dict[str, typing.Optional[typing.Any]], - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[FlowResponse]: - """ - Create or update a Flow. - - Flows can also be identified by the `ID` or their `path`. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Flow - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - attributes : typing.Dict[str, typing.Optional[typing.Any]] - A key-value object identifying the Flow Version. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - version_name : typing.Optional[str] - Unique name for the Flow version. Version names must be unique for a given Flow. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[FlowResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "flows", - method="POST", - json={ - "path": path, - "id": id, - "attributes": attributes, - "version_name": version_name, - "version_description": version_description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ListFlows]: - """ - Get a list of all the versions of a Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ListFlows] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListFlows, - construct_type( - type_=ListFlows, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete_flow_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Delete a version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_flow_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[FlowResponse]: - """ - Update the name or description of the Flow version. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[FlowResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[FlowResponse]: - """ - Deploy Flow to an Environment. - - Set the deployed version for the specified Environment. This Flow - will be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[FlowResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Remove deployed Flow from the Environment. - - Remove the deployed version for the specified Environment. This Flow - will no longer be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[FlowResponse]: - """ - Activate and deactivate Evaluators for monitoring the Flow. - - An activated Evaluator will automatically be run on all new "completed" Logs - within the Flow for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[FlowResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawFlowsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - flow_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - flow: typing.Optional[FlowKernelRequestParams] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[CreateFlowLogResponse]: - """ - Log to a Flow. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Flow. Otherwise, the default deployed version will be chosen. - - If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` - in order to trigger Evaluators. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Flow to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - start_time : typing.Optional[dt.datetime] - The start time of the Trace. Will be updated if a child Log with an earlier start time is added. - - end_time : typing.Optional[dt.datetime] - The end time of the Trace. Will be updated if a child Log with a later end time is added. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - flow_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - flow : typing.Optional[FlowKernelRequestParams] - Flow used to generate the Trace. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[CreateFlowLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "flows/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "run_id": run_id, - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": flow_log_request_environment, - "save": save, - "log_id": log_id, - "flow": convert_and_respect_annotation_metadata( - object_=flow, annotation=FlowKernelRequestParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateFlowLogResponse, - construct_type( - type_=CreateFlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_log( - self, - log_id: str, - *, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - output: typing.Optional[str] = OMIT, - error: typing.Optional[str] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[FlowLogResponse]: - """ - Update the status, inputs, output of a Flow Log. - - Marking a Flow Log as complete will trigger any monitoring Evaluators to run. - Inputs and output (or error) must be provided in order to mark it as complete. - - The end_time log attribute will be set to match the time the log is marked as complete. - - Parameters - ---------- - log_id : str - Unique identifier of the Flow Log. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - List of chat messages that were used as an input to the Flow. - - output_message : typing.Optional[ChatMessageParams] - The output message returned by this Flow. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the Flow Log. - - output : typing.Optional[str] - The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. - - error : typing.Optional[str] - The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. - - log_status : typing.Optional[LogStatus] - Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[FlowLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/logs/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "inputs": inputs, - "output": output, - "error": error, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowLogResponse, - construct_type( - type_=FlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[FlowResponse]: - """ - Retrieve the Flow with the given ID. - - By default, the deployed version of the Flow is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : typing.Optional[str] - A specific Version ID of the Flow to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[FlowResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete the Flow with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - directory_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[FlowResponse]: - """ - Move the Flow to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - path : typing.Optional[str] - Path of the Flow including the Flow name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Flow. - - directory_id : typing.Optional[str] - Unique identifier for the Directory to move Flow to. Starts with `dir_`. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[FlowResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - "directory_id": directory_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[FlowResponse]: - """ - Get a list of Flows. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Flows to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Flow name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Flows by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[FlowResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "flows", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataFlowResponse, - construct_type( - type_=PaginatedDataFlowResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def upsert( - self, - *, - attributes: typing.Dict[str, typing.Optional[typing.Any]], - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[FlowResponse]: - """ - Create or update a Flow. - - Flows can also be identified by the `ID` or their `path`. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Flow - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - attributes : typing.Dict[str, typing.Optional[typing.Any]] - A key-value object identifying the Flow Version. - - path : typing.Optional[str] - Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Flow. - - version_name : typing.Optional[str] - Unique name for the Flow version. Version names must be unique for a given Flow. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[FlowResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "flows", - method="POST", - json={ - "path": path, - "id": id, - "attributes": attributes, - "version_name": version_name, - "version_description": version_description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ListFlows]: - """ - Get a list of all the versions of a Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ListFlows] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListFlows, - construct_type( - type_=ListFlows, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete_flow_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete a version of the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_flow_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[FlowResponse]: - """ - Update the name or description of the Flow version. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - version_id : str - Unique identifier for the specific version of the Flow. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[FlowResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[FlowResponse]: - """ - Deploy Flow to an Environment. - - Set the deployed version for the specified Environment. This Flow - will be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[FlowResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Remove deployed Flow from the Environment. - - Remove the deployed version for the specified Environment. This Flow - will no longer be used for calls made to the Flow in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Flow. - - Parameters - ---------- - id : str - Unique identifier for Flow. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[FlowResponse]: - """ - Activate and deactivate Evaluators for monitoring the Flow. - - An activated Evaluator will automatically be run on all new "completed" Logs - within the Flow for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[FlowResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/logs/__init__.py b/src/humanloop/logs/__init__.py deleted file mode 100644 index 5cde0202..00000000 --- a/src/humanloop/logs/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py deleted file mode 100644 index 278c97cf..00000000 --- a/src/humanloop/logs/client.py +++ /dev/null @@ -1,360 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..types.log_response import LogResponse -from .raw_client import AsyncRawLogsClient, RawLogsClient - - -class LogsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawLogsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawLogsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawLogsClient - """ - return self._raw_client - - def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - version_id: typing.Optional[str] = None, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - search: typing.Optional[str] = None, - metadata_search: typing.Optional[str] = None, - start_date: typing.Optional[dt.datetime] = None, - end_date: typing.Optional[dt.datetime] = None, - include_parent: typing.Optional[bool] = None, - in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, - sample: typing.Optional[int] = None, - include_trace_children: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[LogResponse]: - """ - List all Logs for the given filter criteria. - - Parameters - ---------- - file_id : str - Unique identifier for the File to list Logs for. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - version_id : typing.Optional[str] - If provided, only Logs belonging to the specified Version will be returned. - - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - If provided, returns Logs whose IDs contain any of the specified values as substrings. - - search : typing.Optional[str] - If provided, only Logs that contain the provided string in its inputs and output will be returned. - - metadata_search : typing.Optional[str] - If provided, only Logs that contain the provided string in its metadata will be returned. - - start_date : typing.Optional[dt.datetime] - If provided, only Logs created after the specified date will be returned. - - end_date : typing.Optional[dt.datetime] - If provided, only Logs created before the specified date will be returned. - - include_parent : typing.Optional[bool] - If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. - - in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] - If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. - - sample : typing.Optional[int] - If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[LogResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.logs.list(file_id='file_123abc', size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list( - file_id=file_id, - page=page, - size=size, - version_id=version_id, - id=id, - search=search, - metadata_search=metadata_search, - start_date=start_date, - end_date=end_date, - include_parent=include_parent, - in_trace_filter=in_trace_filter, - sample=sample, - include_trace_children=include_trace_children, - request_options=request_options, - ) - - def delete( - self, - *, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> None: - """ - Delete Logs with the given IDs. - - Parameters - ---------- - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Unique identifiers for the Logs to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.logs.delete(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) - """ - _response = self._raw_client.delete(id=id, request_options=request_options) - return _response.data - - def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse: - """ - Retrieve the Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Log. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.logs.get(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) - """ - _response = self._raw_client.get(id, request_options=request_options) - return _response.data - - -class AsyncLogsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawLogsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawLogsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawLogsClient - """ - return self._raw_client - - async def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - version_id: typing.Optional[str] = None, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - search: typing.Optional[str] = None, - metadata_search: typing.Optional[str] = None, - start_date: typing.Optional[dt.datetime] = None, - end_date: typing.Optional[dt.datetime] = None, - include_parent: typing.Optional[bool] = None, - in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, - sample: typing.Optional[int] = None, - include_trace_children: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[LogResponse]: - """ - List all Logs for the given filter criteria. - - Parameters - ---------- - file_id : str - Unique identifier for the File to list Logs for. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - version_id : typing.Optional[str] - If provided, only Logs belonging to the specified Version will be returned. - - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - If provided, returns Logs whose IDs contain any of the specified values as substrings. - - search : typing.Optional[str] - If provided, only Logs that contain the provided string in its inputs and output will be returned. - - metadata_search : typing.Optional[str] - If provided, only Logs that contain the provided string in its metadata will be returned. - - start_date : typing.Optional[dt.datetime] - If provided, only Logs created after the specified date will be returned. - - end_date : typing.Optional[dt.datetime] - If provided, only Logs created before the specified date will be returned. - - include_parent : typing.Optional[bool] - If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. - - in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] - If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. - - sample : typing.Optional[int] - If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[LogResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.logs.list(file_id='file_123abc', size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list( - file_id=file_id, - page=page, - size=size, - version_id=version_id, - id=id, - search=search, - metadata_search=metadata_search, - start_date=start_date, - end_date=end_date, - include_parent=include_parent, - in_trace_filter=in_trace_filter, - sample=sample, - include_trace_children=include_trace_children, - request_options=request_options, - ) - - async def delete( - self, - *, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> None: - """ - Delete Logs with the given IDs. - - Parameters - ---------- - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Unique identifiers for the Logs to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.logs.delete(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id=id, request_options=request_options) - return _response.data - - async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse: - """ - Retrieve the Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Log. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.logs.get(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get(id, request_options=request_options) - return _response.data diff --git a/src/humanloop/logs/raw_client.py b/src/humanloop/logs/raw_client.py deleted file mode 100644 index e155be92..00000000 --- a/src/humanloop/logs/raw_client.py +++ /dev/null @@ -1,501 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.datetime_utils import serialize_datetime -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..types.log_response import LogResponse -from ..types.paginated_data_log_response import PaginatedDataLogResponse - - -class RawLogsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - version_id: typing.Optional[str] = None, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - search: typing.Optional[str] = None, - metadata_search: typing.Optional[str] = None, - start_date: typing.Optional[dt.datetime] = None, - end_date: typing.Optional[dt.datetime] = None, - include_parent: typing.Optional[bool] = None, - in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, - sample: typing.Optional[int] = None, - include_trace_children: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[LogResponse]: - """ - List all Logs for the given filter criteria. - - Parameters - ---------- - file_id : str - Unique identifier for the File to list Logs for. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - version_id : typing.Optional[str] - If provided, only Logs belonging to the specified Version will be returned. - - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - If provided, returns Logs whose IDs contain any of the specified values as substrings. - - search : typing.Optional[str] - If provided, only Logs that contain the provided string in its inputs and output will be returned. - - metadata_search : typing.Optional[str] - If provided, only Logs that contain the provided string in its metadata will be returned. - - start_date : typing.Optional[dt.datetime] - If provided, only Logs created after the specified date will be returned. - - end_date : typing.Optional[dt.datetime] - If provided, only Logs created before the specified date will be returned. - - include_parent : typing.Optional[bool] - If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. - - in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] - If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. - - sample : typing.Optional[int] - If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[LogResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "logs", - method="GET", - params={ - "file_id": file_id, - "page": page, - "size": size, - "version_id": version_id, - "id": id, - "search": search, - "metadata_search": metadata_search, - "start_date": serialize_datetime(start_date) if start_date is not None else None, - "end_date": serialize_datetime(end_date) if end_date is not None else None, - "include_parent": include_parent, - "in_trace_filter": in_trace_filter, - "sample": sample, - "include_trace_children": include_trace_children, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataLogResponse, - construct_type( - type_=PaginatedDataLogResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - file_id=file_id, - page=page + 1, - size=size, - version_id=version_id, - id=id, - search=search, - metadata_search=metadata_search, - start_date=start_date, - end_date=end_date, - include_parent=include_parent, - in_trace_filter=in_trace_filter, - sample=sample, - include_trace_children=include_trace_children, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete( - self, - *, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[None]: - """ - Delete Logs with the given IDs. - - Parameters - ---------- - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Unique identifiers for the Logs to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - "logs", - method="DELETE", - params={ - "id": id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[LogResponse]: - """ - Retrieve the Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Log. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[LogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"logs/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawLogsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def list( - self, - *, - file_id: str, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - version_id: typing.Optional[str] = None, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - search: typing.Optional[str] = None, - metadata_search: typing.Optional[str] = None, - start_date: typing.Optional[dt.datetime] = None, - end_date: typing.Optional[dt.datetime] = None, - include_parent: typing.Optional[bool] = None, - in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, - sample: typing.Optional[int] = None, - include_trace_children: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[LogResponse]: - """ - List all Logs for the given filter criteria. - - Parameters - ---------- - file_id : str - Unique identifier for the File to list Logs for. - - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Logs to fetch. - - version_id : typing.Optional[str] - If provided, only Logs belonging to the specified Version will be returned. - - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - If provided, returns Logs whose IDs contain any of the specified values as substrings. - - search : typing.Optional[str] - If provided, only Logs that contain the provided string in its inputs and output will be returned. - - metadata_search : typing.Optional[str] - If provided, only Logs that contain the provided string in its metadata will be returned. - - start_date : typing.Optional[dt.datetime] - If provided, only Logs created after the specified date will be returned. - - end_date : typing.Optional[dt.datetime] - If provided, only Logs created before the specified date will be returned. - - include_parent : typing.Optional[bool] - If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. - - in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] - If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. - - sample : typing.Optional[int] - If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) - - include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[LogResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "logs", - method="GET", - params={ - "file_id": file_id, - "page": page, - "size": size, - "version_id": version_id, - "id": id, - "search": search, - "metadata_search": metadata_search, - "start_date": serialize_datetime(start_date) if start_date is not None else None, - "end_date": serialize_datetime(end_date) if end_date is not None else None, - "include_parent": include_parent, - "in_trace_filter": in_trace_filter, - "sample": sample, - "include_trace_children": include_trace_children, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataLogResponse, - construct_type( - type_=PaginatedDataLogResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - file_id=file_id, - page=page + 1, - size=size, - version_id=version_id, - id=id, - search=search, - metadata_search=metadata_search, - start_date=start_date, - end_date=end_date, - include_parent=include_parent, - in_trace_filter=in_trace_filter, - sample=sample, - include_trace_children=include_trace_children, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, - *, - id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[None]: - """ - Delete Logs with the given IDs. - - Parameters - ---------- - id : typing.Optional[typing.Union[str, typing.Sequence[str]]] - Unique identifiers for the Logs to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - "logs", - method="DELETE", - params={ - "id": id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[LogResponse]: - """ - Retrieve the Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Log. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[LogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"logs/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py deleted file mode 100644 index dcff7e62..00000000 --- a/src/humanloop/prompts/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .types import ( - PromptLogRequestPrompt, - PromptLogRequestToolChoice, - PromptLogUpdateRequestToolChoice, - PromptRequestReasoningEffort, - PromptRequestStop, - PromptRequestTemplate, - PromptsCallRequestPrompt, - PromptsCallRequestToolChoice, - PromptsCallStreamRequestPrompt, - PromptsCallStreamRequestToolChoice, -) -from .requests import ( - PromptLogRequestPromptParams, - PromptLogRequestToolChoiceParams, - PromptLogUpdateRequestToolChoiceParams, - PromptRequestReasoningEffortParams, - PromptRequestStopParams, - PromptRequestTemplateParams, - PromptsCallRequestPromptParams, - PromptsCallRequestToolChoiceParams, - PromptsCallStreamRequestPromptParams, - PromptsCallStreamRequestToolChoiceParams, -) - -__all__ = [ - "PromptLogRequestPrompt", - "PromptLogRequestPromptParams", - "PromptLogRequestToolChoice", - "PromptLogRequestToolChoiceParams", - "PromptLogUpdateRequestToolChoice", - "PromptLogUpdateRequestToolChoiceParams", - "PromptRequestReasoningEffort", - "PromptRequestReasoningEffortParams", - "PromptRequestStop", - "PromptRequestStopParams", - "PromptRequestTemplate", - "PromptRequestTemplateParams", - "PromptsCallRequestPrompt", - "PromptsCallRequestPromptParams", - "PromptsCallRequestToolChoice", - "PromptsCallRequestToolChoiceParams", - "PromptsCallStreamRequestPrompt", - "PromptsCallStreamRequestPromptParams", - "PromptsCallStreamRequestToolChoice", - "PromptsCallStreamRequestToolChoiceParams", -] diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py deleted file mode 100644 index cd772a17..00000000 --- a/src/humanloop/prompts/client.py +++ /dev/null @@ -1,2990 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.provider_api_keys import ProviderApiKeysParams -from ..requests.response_format import ResponseFormatParams -from ..requests.tool_function import ToolFunctionParams -from ..types.create_prompt_log_response import CreatePromptLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.list_prompts import ListPrompts -from ..types.log_response import LogResponse -from ..types.log_status import LogStatus -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.populate_template_response import PopulateTemplateResponse -from ..types.prompt_call_response import PromptCallResponse -from ..types.prompt_call_stream_response import PromptCallStreamResponse -from ..types.prompt_kernel_request import PromptKernelRequest -from ..types.prompt_response import PromptResponse -from ..types.sort_order import SortOrder -from ..types.template_language import TemplateLanguage -from .raw_client import AsyncRawPromptsClient, RawPromptsClient -from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams -from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams -from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams -from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams -from .requests.prompt_request_stop import PromptRequestStopParams -from .requests.prompt_request_template import PromptRequestTemplateParams -from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams -from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams -from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams -from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class PromptsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawPromptsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawPromptsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawPromptsClient - """ - return self._raw_client - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompt_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreatePromptLogResponse: - """ - Log to a Prompt. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptLogRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompt_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreatePromptLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - import datetime - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump' - }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', ) - """ - _response = self._raw_client.log( - version_id=version_id, - environment=environment, - run_id=run_id, - path=path, - id=id, - output_message=output_message, - prompt_tokens=prompt_tokens, - reasoning_tokens=reasoning_tokens, - output_tokens=output_tokens, - prompt_cost=prompt_cost, - output_cost=output_cost, - finish_reason=finish_reason, - messages=messages, - tool_choice=tool_choice, - prompt=prompt, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - prompt_log_request_environment=prompt_log_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - def update_log( - self, - id: str, - log_id: str, - *, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> LogResponse: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.update_log(id='id', log_id='log_id', ) - """ - _response = self._raw_client.update_log( - id, - log_id, - output_message=output_message, - prompt_tokens=prompt_tokens, - reasoning_tokens=reasoning_tokens, - output_tokens=output_tokens, - prompt_cost=prompt_cost, - output_cost=output_cost, - finish_reason=finish_reason, - messages=messages, - tool_choice=tool_choice, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[PromptCallStreamResponse]: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallStreamRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.Iterator[PromptCallStreamResponse] - - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.prompts.call_stream() - for chunk in response: - yield chunk - """ - with self._raw_client.call_stream( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - prompt=prompt, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - prompts_call_stream_request_environment=prompts_call_stream_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - num_samples=num_samples, - return_inputs=return_inputs, - logprobs=logprobs, - suffix=suffix, - request_options=request_options, - ) as r: - yield from r.data - - def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptCallResponse: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptCallResponse - - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object' - , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}} - , 'required': [] - }}]}, messages=[{'role': "user", 'content': 'latest apple'}], ) - """ - _response = self._raw_client.call( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - prompt=prompt, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - prompts_call_request_environment=prompts_call_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - num_samples=num_samples, - return_inputs=return_inputs, - logprobs=logprobs, - suffix=suffix, - request_options=request_options, - ) - return _response.data - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[PromptResponse]: - """ - Get a list of all Prompts. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Prompts to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Prompt name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Prompts by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[PromptResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.prompts.list(size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[PromptRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[PromptRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, - linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Create a Prompt or update it with a new version if it already exists. - - Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Prompt - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[PromptRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[PromptRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[ToolFunctionParams]] - The tool specification that the model can choose to call if Tool calling is supported. - - linked_tools : typing.Optional[typing.Sequence[str]] - The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Version names must be unique for a given Prompt. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.upsert(path='Personal Projects/Coding Assistant', model='gpt-4o', endpoint="chat", template=[{'content': 'You are a helpful coding assistant specialising in {{language}}', 'role': "system"}], provider="openai", max_tokens=-1, temperature=0.7, version_name='coding-assistant-v1', version_description='Initial version', ) - """ - _response = self._raw_client.upsert( - model=model, - path=path, - id=id, - endpoint=endpoint, - template=template, - template_language=template_language, - provider=provider, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - stop=stop, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - other=other, - seed=seed, - response_format=response_format, - reasoning_effort=reasoning_effort, - tools=tools, - linked_tools=linked_tools, - attributes=attributes, - version_name=version_name, - version_description=version_description, - description=description, - tags=tags, - readme=readme, - request_options=request_options, - ) - return _response.data - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Retrieve the Prompt with the given ID. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.get(id='pr_30gco7dx6JDq4200GVOHa', ) - """ - _response = self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Prompt with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.delete(id='pr_30gco7dx6JDq4200GVOHa', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Move the Prompt to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - path : typing.Optional[str] - Path of the Prompt including the Prompt name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.move(id='pr_30gco7dx6JDq4200GVOHa', path='new directory/new name', ) - """ - _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - def populate( - self, - id: str, - *, - request: typing.Dict[str, typing.Optional[typing.Any]], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PopulateTemplateResponse: - """ - Retrieve the Prompt with the given ID, including the populated template. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request : typing.Dict[str, typing.Optional[typing.Any]] - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve to populate the template. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from to populate the template. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PopulateTemplateResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.populate(id='id', request={'key': 'value' - }, ) - """ - _response = self._raw_client.populate( - id, request=request, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListPrompts: - """ - Get a list of all the versions of a Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListPrompts - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.list_versions(id='pr_30gco7dx6JDq4200GVOHa', ) - """ - _response = self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - def delete_prompt_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.delete_prompt_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.delete_prompt_version(id, version_id, request_options=request_options) - return _response.data - - def patch_prompt_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Update the name or description of the Prompt version. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.patch_prompt_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.patch_prompt_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> PromptResponse: - """ - Deploy Prompt to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) - """ - _response = self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Prompt from the Environment. - - Remove the deployed version for the specified Environment. This Prompt - will no longer be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.remove_deployment(id='id', environment_id='environment_id', ) - """ - _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.list_environments(id='pr_30gco7dx6JDq4200GVOHa', ) - """ - _response = self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Activate and deactivate Evaluators for monitoring the Prompt. - - An activated Evaluator will automatically be run on all new Logs - within the Prompt for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.update_monitoring(id='pr_30gco7dx6JDq4200GVOHa', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) - """ - _response = self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> str: - """ - Serialize a Prompt to the .prompt file format. - - Useful for storing the Prompt with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - str - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.serialize(id='id', ) - """ - _response = self._raw_client.serialize( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def deserialize( - self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None - ) -> PromptKernelRequest: - """ - Deserialize a Prompt from the .prompt file format. - - This returns a subset of the attributes required by a Prompt. - This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - prompt : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptKernelRequest - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.prompts.deserialize(prompt='prompt', ) - """ - _response = self._raw_client.deserialize(prompt=prompt, request_options=request_options) - return _response.data - - -class AsyncPromptsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawPromptsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawPromptsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawPromptsClient - """ - return self._raw_client - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompt_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreatePromptLogResponse: - """ - Log to a Prompt. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptLogRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompt_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreatePromptLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import datetime - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump' - }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', ) - asyncio.run(main()) - """ - _response = await self._raw_client.log( - version_id=version_id, - environment=environment, - run_id=run_id, - path=path, - id=id, - output_message=output_message, - prompt_tokens=prompt_tokens, - reasoning_tokens=reasoning_tokens, - output_tokens=output_tokens, - prompt_cost=prompt_cost, - output_cost=output_cost, - finish_reason=finish_reason, - messages=messages, - tool_choice=tool_choice, - prompt=prompt, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - prompt_log_request_environment=prompt_log_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - async def update_log( - self, - id: str, - log_id: str, - *, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> LogResponse: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.update_log(id='id', log_id='log_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_log( - id, - log_id, - output_message=output_message, - prompt_tokens=prompt_tokens, - reasoning_tokens=reasoning_tokens, - output_tokens=output_tokens, - prompt_cost=prompt_cost, - output_cost=output_cost, - finish_reason=finish_reason, - messages=messages, - tool_choice=tool_choice, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - async def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[PromptCallStreamResponse]: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallStreamRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.AsyncIterator[PromptCallStreamResponse] - - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.prompts.call_stream() - async for chunk in response: - yield chunk - asyncio.run(main()) - """ - async with self._raw_client.call_stream( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - prompt=prompt, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - prompts_call_stream_request_environment=prompts_call_stream_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - num_samples=num_samples, - return_inputs=return_inputs, - logprobs=logprobs, - suffix=suffix, - request_options=request_options, - ) as r: - async for data in r.data: - yield data - - async def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptCallResponse: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptCallResponse - - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object' - , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}} - , 'required': [] - }}]}, messages=[{'role': "user", 'content': 'latest apple'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.call( - version_id=version_id, - environment=environment, - path=path, - id=id, - messages=messages, - tool_choice=tool_choice, - prompt=prompt, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - prompts_call_request_environment=prompts_call_request_environment, - save=save, - log_id=log_id, - provider_api_keys=provider_api_keys, - num_samples=num_samples, - return_inputs=return_inputs, - logprobs=logprobs, - suffix=suffix, - request_options=request_options, - ) - return _response.data - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[PromptResponse]: - """ - Get a list of all Prompts. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Prompts to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Prompt name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Prompts by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[PromptResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.prompts.list(size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - async def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[PromptRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[PromptRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, - linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Create a Prompt or update it with a new version if it already exists. - - Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Prompt - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[PromptRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[PromptRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[ToolFunctionParams]] - The tool specification that the model can choose to call if Tool calling is supported. - - linked_tools : typing.Optional[typing.Sequence[str]] - The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Version names must be unique for a given Prompt. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.upsert(path='Personal Projects/Coding Assistant', model='gpt-4o', endpoint="chat", template=[{'content': 'You are a helpful coding assistant specialising in {{language}}', 'role': "system"}], provider="openai", max_tokens=-1, temperature=0.7, version_name='coding-assistant-v1', version_description='Initial version', ) - asyncio.run(main()) - """ - _response = await self._raw_client.upsert( - model=model, - path=path, - id=id, - endpoint=endpoint, - template=template, - template_language=template_language, - provider=provider, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - stop=stop, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - other=other, - seed=seed, - response_format=response_format, - reasoning_effort=reasoning_effort, - tools=tools, - linked_tools=linked_tools, - attributes=attributes, - version_name=version_name, - version_description=version_description, - description=description, - tags=tags, - readme=readme, - request_options=request_options, - ) - return _response.data - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Retrieve the Prompt with the given ID. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.get(id='pr_30gco7dx6JDq4200GVOHa', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Prompt with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.delete(id='pr_30gco7dx6JDq4200GVOHa', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Move the Prompt to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - path : typing.Optional[str] - Path of the Prompt including the Prompt name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.move(id='pr_30gco7dx6JDq4200GVOHa', path='new directory/new name', ) - asyncio.run(main()) - """ - _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - async def populate( - self, - id: str, - *, - request: typing.Dict[str, typing.Optional[typing.Any]], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PopulateTemplateResponse: - """ - Retrieve the Prompt with the given ID, including the populated template. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request : typing.Dict[str, typing.Optional[typing.Any]] - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve to populate the template. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from to populate the template. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PopulateTemplateResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.populate(id='id', request={'key': 'value' - }, ) - asyncio.run(main()) - """ - _response = await self._raw_client.populate( - id, request=request, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListPrompts: - """ - Get a list of all the versions of a Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListPrompts - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.list_versions(id='pr_30gco7dx6JDq4200GVOHa', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - async def delete_prompt_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.delete_prompt_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete_prompt_version(id, version_id, request_options=request_options) - return _response.data - - async def patch_prompt_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Update the name or description of the Prompt version. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.patch_prompt_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.patch_prompt_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> PromptResponse: - """ - Deploy Prompt to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Prompt from the Environment. - - Remove the deployed version for the specified Environment. This Prompt - will no longer be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.remove_deployment(id='id', environment_id='environment_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.list_environments(id='pr_30gco7dx6JDq4200GVOHa', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> PromptResponse: - """ - Activate and deactivate Evaluators for monitoring the Prompt. - - An activated Evaluator will automatically be run on all new Logs - within the Prompt for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.update_monitoring(id='pr_30gco7dx6JDq4200GVOHa', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - async def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> str: - """ - Serialize a Prompt to the .prompt file format. - - Useful for storing the Prompt with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - str - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.serialize(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.serialize( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def deserialize( - self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None - ) -> PromptKernelRequest: - """ - Deserialize a Prompt from the .prompt file format. - - This returns a subset of the attributes required by a Prompt. - This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - prompt : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PromptKernelRequest - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.prompts.deserialize(prompt='prompt', ) - asyncio.run(main()) - """ - _response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options) - return _response.data diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py deleted file mode 100644 index 5d12b08e..00000000 --- a/src/humanloop/prompts/raw_client.py +++ /dev/null @@ -1,3977 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import contextlib -import datetime as dt -import json -import typing -from json.decoder import JSONDecodeError - -import httpx_sse -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..requests.chat_message import ChatMessageParams -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.provider_api_keys import ProviderApiKeysParams -from ..requests.response_format import ResponseFormatParams -from ..requests.tool_function import ToolFunctionParams -from ..types.create_prompt_log_response import CreatePromptLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_sort_by import FileSortBy -from ..types.http_validation_error import HttpValidationError -from ..types.list_prompts import ListPrompts -from ..types.log_response import LogResponse -from ..types.log_status import LogStatus -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.paginated_data_prompt_response import PaginatedDataPromptResponse -from ..types.populate_template_response import PopulateTemplateResponse -from ..types.prompt_call_response import PromptCallResponse -from ..types.prompt_call_stream_response import PromptCallStreamResponse -from ..types.prompt_kernel_request import PromptKernelRequest -from ..types.prompt_response import PromptResponse -from ..types.sort_order import SortOrder -from ..types.template_language import TemplateLanguage -from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams -from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams -from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams -from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams -from .requests.prompt_request_stop import PromptRequestStopParams -from .requests.prompt_request_template import PromptRequestTemplateParams -from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams -from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams -from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams -from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawPromptsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompt_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[CreatePromptLogResponse]: - """ - Log to a Prompt. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptLogRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompt_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[CreatePromptLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "prompts/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "run_id": run_id, - "path": path, - "id": id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptLogRequestPromptParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompt_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreatePromptLogResponse, - construct_type( - type_=CreatePromptLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_log( - self, - id: str, - log_id: str, - *, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[LogResponse]: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[LogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" - ), - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - @contextlib.contextmanager - def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallStreamRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]] - - """ - with self._client_wrapper.httpx_client.stream( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_stream_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) as _response: - - def stream() -> HttpResponse[typing.Iterator[PromptCallStreamResponse]]: - try: - if 200 <= _response.status_code < 300: - - def _iter(): - _event_source = httpx_sse.EventSource(_response) - for _sse in _event_source.iter_sse(): - if _sse.data == None: - return - try: - yield typing.cast( - PromptCallStreamResponse, - construct_type( - type_=PromptCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except Exception: - pass - return - - return HttpResponse(response=_response, data=_iter()) - _response.read() - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError( - status_code=_response.status_code, headers=dict(_response.headers), body=_response.text - ) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - yield stream() - - def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PromptCallResponse]: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptCallResponse] - - """ - _response = self._client_wrapper.httpx_client.request( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptCallResponse, - construct_type( - type_=PromptCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[PromptResponse]: - """ - Get a list of all Prompts. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Prompts to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Prompt name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Prompts by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[PromptResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "prompts", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataPromptResponse, - construct_type( - type_=PaginatedDataPromptResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[PromptRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[PromptRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, - linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PromptResponse]: - """ - Create a Prompt or update it with a new version if it already exists. - - Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Prompt - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[PromptRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[PromptRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[ToolFunctionParams]] - The tool specification that the model can choose to call if Tool calling is supported. - - linked_tools : typing.Optional[typing.Sequence[str]] - The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Version names must be unique for a given Prompt. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "prompts", - method="POST", - json={ - "path": path, - "id": id, - "model": model, - "endpoint": endpoint, - "template": convert_and_respect_annotation_metadata( - object_=template, annotation=PromptRequestTemplateParams, direction="write" - ), - "template_language": template_language, - "provider": provider, - "max_tokens": max_tokens, - "temperature": temperature, - "top_p": top_p, - "stop": convert_and_respect_annotation_metadata( - object_=stop, annotation=PromptRequestStopParams, direction="write" - ), - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "other": other, - "seed": seed, - "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormatParams, direction="write" - ), - "reasoning_effort": convert_and_respect_annotation_metadata( - object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write" - ), - "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" - ), - "linked_tools": linked_tools, - "attributes": attributes, - "version_name": version_name, - "version_description": version_description, - "description": description, - "tags": tags, - "readme": readme, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PromptResponse]: - """ - Retrieve the Prompt with the given ID. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete the Prompt with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PromptResponse]: - """ - Move the Prompt to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - path : typing.Optional[str] - Path of the Prompt including the Prompt name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def populate( - self, - id: str, - *, - request: typing.Dict[str, typing.Optional[typing.Any]], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PopulateTemplateResponse]: - """ - Retrieve the Prompt with the given ID, including the populated template. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request : typing.Dict[str, typing.Optional[typing.Any]] - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve to populate the template. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from to populate the template. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PopulateTemplateResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/populate", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json=request, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PopulateTemplateResponse, - construct_type( - type_=PopulateTemplateResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ListPrompts]: - """ - Get a list of all the versions of a Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ListPrompts] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListPrompts, - construct_type( - type_=ListPrompts, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete_prompt_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Delete a version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def patch_prompt_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PromptResponse]: - """ - Update the name or description of the Prompt version. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[PromptResponse]: - """ - Deploy Prompt to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Remove deployed Prompt from the Environment. - - Remove the deployed version for the specified Environment. This Prompt - will no longer be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PromptResponse]: - """ - Activate and deactivate Evaluators for monitoring the Prompt. - - An activated Evaluator will automatically be run on all new Logs - within the Prompt for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[str]: - """ - Serialize a Prompt to the .prompt file format. - - Useful for storing the Prompt with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[str] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/serialize", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=_response.text) # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def deserialize( - self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[PromptKernelRequest]: - """ - Deserialize a Prompt from the .prompt file format. - - This returns a subset of the attributes required by a Prompt. - This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - prompt : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[PromptKernelRequest] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "prompts/deserialize", - method="POST", - json={ - "prompt": prompt, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptKernelRequest, - construct_type( - type_=PromptKernelRequest, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawPromptsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - run_id: typing.Optional[str] = OMIT, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompt_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[CreatePromptLogResponse]: - """ - Log to a Prompt. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - run_id : typing.Optional[str] - Unique identifier for the Run to associate the Log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptLogRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompt_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[CreatePromptLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "prompts/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "run_id": run_id, - "path": path, - "id": id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptLogRequestPromptParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompt_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreatePromptLogResponse, - construct_type( - type_=CreatePromptLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_log( - self, - id: str, - log_id: str, - *, - output_message: typing.Optional[ChatMessageParams] = OMIT, - prompt_tokens: typing.Optional[int] = OMIT, - reasoning_tokens: typing.Optional[int] = OMIT, - output_tokens: typing.Optional[int] = OMIT, - prompt_cost: typing.Optional[float] = OMIT, - output_cost: typing.Optional[float] = OMIT, - finish_reason: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[LogResponse]: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output_message : typing.Optional[ChatMessageParams] - The message returned by the provider. - - prompt_tokens : typing.Optional[int] - Number of tokens in the prompt used to generate the output. - - reasoning_tokens : typing.Optional[int] - Number of reasoning tokens used to generate the output. - - output_tokens : typing.Optional[int] - Number of tokens in the output generated by the model. - - prompt_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the prompt. - - output_cost : typing.Optional[float] - Cost in dollars associated to the tokens in the output. - - finish_reason : typing.Optional[str] - Reason the generation finished. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[LogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" - ), - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - @contextlib.asynccontextmanager - async def call_stream( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_stream_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallStreamRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_stream_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Yields - ------ - typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]] - - """ - async with self._client_wrapper.httpx_client.stream( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_stream_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) as _response: - - async def stream() -> AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]: - try: - if 200 <= _response.status_code < 300: - - async def _iter(): - _event_source = httpx_sse.EventSource(_response) - async for _sse in _event_source.aiter_sse(): - if _sse.data == None: - return - try: - yield typing.cast( - PromptCallStreamResponse, - construct_type( - type_=PromptCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except Exception: - pass - return - - return AsyncHttpResponse(response=_response, data=_iter()) - await _response.aread() - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError( - status_code=_response.status_code, headers=dict(_response.headers), body=_response.text - ) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - yield await stream() - - async def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, - tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, - prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - prompts_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, - num_samples: typing.Optional[int] = OMIT, - return_inputs: typing.Optional[bool] = OMIT, - logprobs: typing.Optional[int] = OMIT, - suffix: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PromptCallResponse]: - """ - Call a Prompt. - - Calling a Prompt calls the model provider before logging - the request, responses and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Prompt. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Prompt details in the request body. In this case, we will check if the details correspond - to an existing version of the Prompt. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Prompt details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Prompt to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - messages : typing.Optional[typing.Sequence[ChatMessageParams]] - The messages passed to the to provider chat endpoint. - - tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - - prompt : typing.Optional[PromptsCallRequestPromptParams] - The Prompt configuration to use. Two formats are supported: - - An object representing the details of the Prompt configuration - - A string representing the raw contents of a .prompt file - A new Prompt version will be created if the provided details do not match any existing version. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - prompts_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - provider_api_keys : typing.Optional[ProviderApiKeysParams] - API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - - num_samples : typing.Optional[int] - The number of generations. - - return_inputs : typing.Optional[bool] - Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - - logprobs : typing.Optional[int] - Include the log probabilities of the top n tokens in the provider_response - - suffix : typing.Optional[str] - The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptCallResponse] - - """ - _response = await self._client_wrapper.httpx_client.request( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptCallResponse, - construct_type( - type_=PromptCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[PromptResponse]: - """ - Get a list of all Prompts. - - Parameters - ---------- - page : typing.Optional[int] - Page number for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Prompts to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Prompt name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Prompts by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[PromptResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "prompts", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataPromptResponse, - construct_type( - type_=PaginatedDataPromptResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def upsert( - self, - *, - model: str, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - endpoint: typing.Optional[ModelEndpoints] = OMIT, - template: typing.Optional[PromptRequestTemplateParams] = OMIT, - template_language: typing.Optional[TemplateLanguage] = OMIT, - provider: typing.Optional[ModelProviders] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - temperature: typing.Optional[float] = OMIT, - top_p: typing.Optional[float] = OMIT, - stop: typing.Optional[PromptRequestStopParams] = OMIT, - presence_penalty: typing.Optional[float] = OMIT, - frequency_penalty: typing.Optional[float] = OMIT, - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - seed: typing.Optional[int] = OMIT, - response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, - tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, - linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - tags: typing.Optional[typing.Sequence[str]] = OMIT, - readme: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PromptResponse]: - """ - Create a Prompt or update it with a new version if it already exists. - - Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Prompt - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - model : str - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - - path : typing.Optional[str] - Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Prompt. - - endpoint : typing.Optional[ModelEndpoints] - The provider model endpoint used. - - template : typing.Optional[PromptRequestTemplateParams] - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - - template_language : typing.Optional[TemplateLanguage] - The template language to use for rendering the template. - - provider : typing.Optional[ModelProviders] - The company providing the underlying model service. - - max_tokens : typing.Optional[int] - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - - temperature : typing.Optional[float] - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - - top_p : typing.Optional[float] - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - stop : typing.Optional[PromptRequestStopParams] - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - - presence_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - - frequency_penalty : typing.Optional[float] - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - - other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Other parameter values to be passed to the provider call. - - seed : typing.Optional[int] - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - - response_format : typing.Optional[ResponseFormatParams] - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - - reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - - tools : typing.Optional[typing.Sequence[ToolFunctionParams]] - The tool specification that the model can choose to call if Tool calling is supported. - - linked_tools : typing.Optional[typing.Sequence[str]] - The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - - version_name : typing.Optional[str] - Unique name for the Prompt version. Version names must be unique for a given Prompt. - - version_description : typing.Optional[str] - Description of the version, e.g., the changes made in this version. - - description : typing.Optional[str] - Description of the Prompt. - - tags : typing.Optional[typing.Sequence[str]] - List of tags associated with this prompt. - - readme : typing.Optional[str] - Long description of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "prompts", - method="POST", - json={ - "path": path, - "id": id, - "model": model, - "endpoint": endpoint, - "template": convert_and_respect_annotation_metadata( - object_=template, annotation=PromptRequestTemplateParams, direction="write" - ), - "template_language": template_language, - "provider": provider, - "max_tokens": max_tokens, - "temperature": temperature, - "top_p": top_p, - "stop": convert_and_respect_annotation_metadata( - object_=stop, annotation=PromptRequestStopParams, direction="write" - ), - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "other": other, - "seed": seed, - "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormatParams, direction="write" - ), - "reasoning_effort": convert_and_respect_annotation_metadata( - object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write" - ), - "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" - ), - "linked_tools": linked_tools, - "attributes": attributes, - "version_name": version_name, - "version_description": version_description, - "description": description, - "tags": tags, - "readme": readme, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PromptResponse]: - """ - Retrieve the Prompt with the given ID. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete the Prompt with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PromptResponse]: - """ - Move the Prompt to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - path : typing.Optional[str] - Path of the Prompt including the Prompt name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def populate( - self, - id: str, - *, - request: typing.Dict[str, typing.Optional[typing.Any]], - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PopulateTemplateResponse]: - """ - Retrieve the Prompt with the given ID, including the populated template. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request : typing.Dict[str, typing.Optional[typing.Any]] - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve to populate the template. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from to populate the template. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PopulateTemplateResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/populate", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json=request, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PopulateTemplateResponse, - construct_type( - type_=PopulateTemplateResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ListPrompts]: - """ - Get a list of all the versions of a Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ListPrompts] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListPrompts, - construct_type( - type_=ListPrompts, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete_prompt_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete a version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def patch_prompt_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PromptResponse]: - """ - Update the name or description of the Prompt version. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : str - Unique identifier for the specific version of the Prompt. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[PromptResponse]: - """ - Deploy Prompt to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Remove deployed Prompt from the Environment. - - Remove the deployed version for the specified Environment. This Prompt - will no longer be used for calls made to the Prompt in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PromptResponse]: - """ - Activate and deactivate Evaluators for monitoring the Prompt. - - An activated Evaluator will automatically be run on all new Logs - within the Prompt for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def serialize( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[str]: - """ - Serialize a Prompt to the .prompt file format. - - Useful for storing the Prompt with your code in a version control system, - or for editing with an AI tool. - - By default, the deployed version of the Prompt is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Prompt. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - version_id : typing.Optional[str] - A specific Version ID of the Prompt to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[str] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/serialize", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=_response.text) # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def deserialize( - self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[PromptKernelRequest]: - """ - Deserialize a Prompt from the .prompt file format. - - This returns a subset of the attributes required by a Prompt. - This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) - - Parameters - ---------- - prompt : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[PromptKernelRequest] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "prompts/deserialize", - method="POST", - json={ - "prompt": prompt, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - PromptKernelRequest, - construct_type( - type_=PromptKernelRequest, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py deleted file mode 100644 index 67f6233e..00000000 --- a/src/humanloop/prompts/requests/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .prompt_log_request_prompt import PromptLogRequestPromptParams -from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams -from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams -from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams -from .prompt_request_stop import PromptRequestStopParams -from .prompt_request_template import PromptRequestTemplateParams -from .prompts_call_request_prompt import PromptsCallRequestPromptParams -from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams -from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams -from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams - -__all__ = [ - "PromptLogRequestPromptParams", - "PromptLogRequestToolChoiceParams", - "PromptLogUpdateRequestToolChoiceParams", - "PromptRequestReasoningEffortParams", - "PromptRequestStopParams", - "PromptRequestTemplateParams", - "PromptsCallRequestPromptParams", - "PromptsCallRequestToolChoiceParams", - "PromptsCallStreamRequestPromptParams", - "PromptsCallStreamRequestToolChoiceParams", -] diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py deleted file mode 100644 index 18417e47..00000000 --- a/src/humanloop/prompts/requests/prompt_log_request_prompt.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.prompt_kernel_request import PromptKernelRequestParams - -PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str] diff --git a/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py b/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py deleted file mode 100644 index eb1a3a0d..00000000 --- a/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.tool_choice import ToolChoiceParams - -PromptLogRequestToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py b/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py deleted file mode 100644 index 18598c0a..00000000 --- a/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.tool_choice import ToolChoiceParams - -PromptLogUpdateRequestToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py deleted file mode 100644 index c40a1fdd..00000000 --- a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort - -PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/prompts/requests/prompt_request_stop.py b/src/humanloop/prompts/requests/prompt_request_stop.py deleted file mode 100644 index ed7fd9c7..00000000 --- a/src/humanloop/prompts/requests/prompt_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PromptRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/prompts/requests/prompt_request_template.py b/src/humanloop/prompts/requests/prompt_request_template.py deleted file mode 100644 index 51e6905d..00000000 --- a/src/humanloop/prompts/requests/prompt_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.chat_message import ChatMessageParams - -PromptRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py deleted file mode 100644 index c9ef087f..00000000 --- a/src/humanloop/prompts/requests/prompts_call_request_prompt.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.prompt_kernel_request import PromptKernelRequestParams - -PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str] diff --git a/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py b/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py deleted file mode 100644 index 9a2e39ad..00000000 --- a/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.tool_choice import ToolChoiceParams - -PromptsCallRequestToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py deleted file mode 100644 index f27fc93b..00000000 --- a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.prompt_kernel_request import PromptKernelRequestParams - -PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str] diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py b/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py deleted file mode 100644 index d8e537d8..00000000 --- a/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...requests.tool_choice import ToolChoiceParams - -PromptsCallStreamRequestToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py deleted file mode 100644 index 964060c2..00000000 --- a/src/humanloop/prompts/types/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .prompt_log_request_prompt import PromptLogRequestPrompt -from .prompt_log_request_tool_choice import PromptLogRequestToolChoice -from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice -from .prompt_request_reasoning_effort import PromptRequestReasoningEffort -from .prompt_request_stop import PromptRequestStop -from .prompt_request_template import PromptRequestTemplate -from .prompts_call_request_prompt import PromptsCallRequestPrompt -from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice -from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt -from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice - -__all__ = [ - "PromptLogRequestPrompt", - "PromptLogRequestToolChoice", - "PromptLogUpdateRequestToolChoice", - "PromptRequestReasoningEffort", - "PromptRequestStop", - "PromptRequestTemplate", - "PromptsCallRequestPrompt", - "PromptsCallRequestToolChoice", - "PromptsCallStreamRequestPrompt", - "PromptsCallStreamRequestToolChoice", -] diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py deleted file mode 100644 index 6b1c1c5e..00000000 --- a/src/humanloop/prompts/types/prompt_log_request_prompt.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.prompt_kernel_request import PromptKernelRequest - -PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str] diff --git a/src/humanloop/prompts/types/prompt_log_request_tool_choice.py b/src/humanloop/prompts/types/prompt_log_request_tool_choice.py deleted file mode 100644 index f7c0c6e9..00000000 --- a/src/humanloop/prompts/types/prompt_log_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.tool_choice import ToolChoice - -PromptLogRequestToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py b/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py deleted file mode 100644 index 0edb325c..00000000 --- a/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.tool_choice import ToolChoice - -PromptLogUpdateRequestToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py deleted file mode 100644 index 89eefb37..00000000 --- a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort - -PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/prompts/types/prompt_request_stop.py b/src/humanloop/prompts/types/prompt_request_stop.py deleted file mode 100644 index e2f6d535..00000000 --- a/src/humanloop/prompts/types/prompt_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PromptRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/prompts/types/prompt_request_template.py b/src/humanloop/prompts/types/prompt_request_template.py deleted file mode 100644 index 0e3dc1b4..00000000 --- a/src/humanloop/prompts/types/prompt_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.chat_message import ChatMessage - -PromptRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py deleted file mode 100644 index 98cb80c3..00000000 --- a/src/humanloop/prompts/types/prompts_call_request_prompt.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.prompt_kernel_request import PromptKernelRequest - -PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str] diff --git a/src/humanloop/prompts/types/prompts_call_request_tool_choice.py b/src/humanloop/prompts/types/prompts_call_request_tool_choice.py deleted file mode 100644 index 8fc2cad0..00000000 --- a/src/humanloop/prompts/types/prompts_call_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.tool_choice import ToolChoice - -PromptsCallRequestToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py deleted file mode 100644 index c623bcae..00000000 --- a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.prompt_kernel_request import PromptKernelRequest - -PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str] diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py b/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py deleted file mode 100644 index 67b9e533..00000000 --- a/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ...types.tool_choice import ToolChoice - -PromptsCallStreamRequestToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/py.typed b/src/humanloop/py.typed deleted file mode 100644 index e69de29b..00000000 diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py deleted file mode 100644 index a95f70ac..00000000 --- a/src/humanloop/requests/__init__.py +++ /dev/null @@ -1,339 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .agent_call_response import AgentCallResponseParams -from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams -from .agent_call_stream_response import AgentCallStreamResponseParams -from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams -from .agent_config_response import AgentConfigResponseParams -from .agent_continue_call_response import AgentContinueCallResponseParams -from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams -from .agent_continue_call_stream_response import AgentContinueCallStreamResponseParams -from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams -from .agent_inline_tool import AgentInlineToolParams -from .agent_kernel_request import AgentKernelRequestParams -from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams -from .agent_kernel_request_stop import AgentKernelRequestStopParams -from .agent_kernel_request_template import AgentKernelRequestTemplateParams -from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams -from .agent_linked_file_request import AgentLinkedFileRequestParams -from .agent_linked_file_response import AgentLinkedFileResponseParams -from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams -from .agent_log_response import AgentLogResponseParams -from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams -from .agent_log_stream_response import AgentLogStreamResponseParams -from .agent_response import AgentResponseParams -from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams -from .agent_response_stop import AgentResponseStopParams -from .agent_response_template import AgentResponseTemplateParams -from .agent_response_tools_item import AgentResponseToolsItemParams -from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams -from .anthropic_thinking_content import AnthropicThinkingContentParams -from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams -from .chat_message import ChatMessageParams -from .chat_message_content import ChatMessageContentParams -from .chat_message_content_item import ChatMessageContentItemParams -from .chat_message_thinking_item import ChatMessageThinkingItemParams -from .code_evaluator_request import CodeEvaluatorRequestParams -from .create_agent_log_response import CreateAgentLogResponseParams -from .create_datapoint_request import CreateDatapointRequestParams -from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams -from .create_evaluator_log_response import CreateEvaluatorLogResponseParams -from .create_flow_log_response import CreateFlowLogResponseParams -from .create_prompt_log_response import CreatePromptLogResponseParams -from .create_tool_log_response import CreateToolLogResponseParams -from .dashboard_configuration import DashboardConfigurationParams -from .datapoint_response import DatapointResponseParams -from .datapoint_response_target_value import DatapointResponseTargetValueParams -from .dataset_response import DatasetResponseParams -from .directory_response import DirectoryResponseParams -from .directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponseParams -from .directory_with_parents_and_children_response_files_item import ( - DirectoryWithParentsAndChildrenResponseFilesItemParams, -) -from .environment_response import EnvironmentResponseParams -from .evaluatee_request import EvaluateeRequestParams -from .evaluatee_response import EvaluateeResponseParams -from .evaluation_evaluator_response import EvaluationEvaluatorResponseParams -from .evaluation_log_response import EvaluationLogResponseParams -from .evaluation_response import EvaluationResponseParams -from .evaluation_run_response import EvaluationRunResponseParams -from .evaluation_runs_response import EvaluationRunsResponseParams -from .evaluation_stats import EvaluationStatsParams -from .evaluator_activation_deactivation_request import EvaluatorActivationDeactivationRequestParams -from .evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from .evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from .evaluator_aggregate import EvaluatorAggregateParams -from .evaluator_config_response import EvaluatorConfigResponseParams -from .evaluator_file_id import EvaluatorFileIdParams -from .evaluator_file_path import EvaluatorFilePathParams -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams -from .evaluator_log_response import EvaluatorLogResponseParams -from .evaluator_log_response_judgment import EvaluatorLogResponseJudgmentParams -from .evaluator_response import EvaluatorResponseParams -from .evaluator_response_spec import EvaluatorResponseSpecParams -from .evaluator_version_id import EvaluatorVersionIdParams -from .external_evaluator_request import ExternalEvaluatorRequestParams -from .file_environment_response import FileEnvironmentResponseParams -from .file_environment_response_file import FileEnvironmentResponseFileParams -from .file_environment_variable_request import FileEnvironmentVariableRequestParams -from .file_id import FileIdParams -from .file_path import FilePathParams -from .file_request import FileRequestParams -from .flow_kernel_request import FlowKernelRequestParams -from .flow_log_response import FlowLogResponseParams -from .flow_response import FlowResponseParams -from .function_tool import FunctionToolParams -from .function_tool_choice import FunctionToolChoiceParams -from .http_validation_error import HttpValidationErrorParams -from .human_evaluator_request import HumanEvaluatorRequestParams -from .image_chat_content import ImageChatContentParams -from .image_url import ImageUrlParams -from .input_response import InputResponseParams -from .linked_file_request import LinkedFileRequestParams -from .linked_tool_response import LinkedToolResponseParams -from .list_agents import ListAgentsParams -from .list_datasets import ListDatasetsParams -from .list_evaluators import ListEvaluatorsParams -from .list_flows import ListFlowsParams -from .list_prompts import ListPromptsParams -from .list_tools import ListToolsParams -from .llm_evaluator_request import LlmEvaluatorRequestParams -from .log_response import LogResponseParams -from .log_stream_response import LogStreamResponseParams -from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams -from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams -from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams -from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams -from .overall_stats import OverallStatsParams -from .paginated_data_agent_response import PaginatedDataAgentResponseParams -from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams -from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams -from .paginated_data_flow_response import PaginatedDataFlowResponseParams -from .paginated_data_log_response import PaginatedDataLogResponseParams -from .paginated_data_prompt_response import PaginatedDataPromptResponseParams -from .paginated_data_tool_response import PaginatedDataToolResponseParams -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, -) -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, -) -from .paginated_datapoint_response import PaginatedDatapointResponseParams -from .paginated_dataset_response import PaginatedDatasetResponseParams -from .paginated_evaluation_response import PaginatedEvaluationResponseParams -from .populate_template_response import PopulateTemplateResponseParams -from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams -from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams -from .populate_template_response_stop import PopulateTemplateResponseStopParams -from .populate_template_response_template import PopulateTemplateResponseTemplateParams -from .prompt_call_log_response import PromptCallLogResponseParams -from .prompt_call_response import PromptCallResponseParams -from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams -from .prompt_call_stream_response import PromptCallStreamResponseParams -from .prompt_kernel_request import PromptKernelRequestParams -from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams -from .prompt_kernel_request_stop import PromptKernelRequestStopParams -from .prompt_kernel_request_template import PromptKernelRequestTemplateParams -from .prompt_log_response import PromptLogResponseParams -from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams -from .prompt_response import PromptResponseParams -from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams -from .prompt_response_stop import PromptResponseStopParams -from .prompt_response_template import PromptResponseTemplateParams -from .provider_api_keys import ProviderApiKeysParams -from .response_format import ResponseFormatParams -from .run_stats_response import RunStatsResponseParams -from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams -from .run_version_response import RunVersionResponseParams -from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams -from .text_chat_content import TextChatContentParams -from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams -from .tool_call import ToolCallParams -from .tool_call_response import ToolCallResponseParams -from .tool_choice import ToolChoiceParams -from .tool_function import ToolFunctionParams -from .tool_kernel_request import ToolKernelRequestParams -from .tool_log_response import ToolLogResponseParams -from .tool_response import ToolResponseParams -from .update_version_request import UpdateVersionRequestParams -from .validation_error import ValidationErrorParams -from .validation_error_loc_item import ValidationErrorLocItemParams -from .version_deployment_response import VersionDeploymentResponseParams -from .version_deployment_response_file import VersionDeploymentResponseFileParams -from .version_id import VersionIdParams -from .version_id_response import VersionIdResponseParams -from .version_id_response_version import VersionIdResponseVersionParams -from .version_reference_response import VersionReferenceResponseParams -from .version_stats_response import VersionStatsResponseParams -from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams - -__all__ = [ - "AgentCallResponseParams", - "AgentCallResponseToolChoiceParams", - "AgentCallStreamResponseParams", - "AgentCallStreamResponsePayloadParams", - "AgentConfigResponseParams", - "AgentContinueCallResponseParams", - "AgentContinueCallResponseToolChoiceParams", - "AgentContinueCallStreamResponseParams", - "AgentContinueCallStreamResponsePayloadParams", - "AgentInlineToolParams", - "AgentKernelRequestParams", - "AgentKernelRequestReasoningEffortParams", - "AgentKernelRequestStopParams", - "AgentKernelRequestTemplateParams", - "AgentKernelRequestToolsItemParams", - "AgentLinkedFileRequestParams", - "AgentLinkedFileResponseFileParams", - "AgentLinkedFileResponseParams", - "AgentLogResponseParams", - "AgentLogResponseToolChoiceParams", - "AgentLogStreamResponseParams", - "AgentResponseParams", - "AgentResponseReasoningEffortParams", - "AgentResponseStopParams", - "AgentResponseTemplateParams", - "AgentResponseToolsItemParams", - "AnthropicRedactedThinkingContentParams", - "AnthropicThinkingContentParams", - "BooleanEvaluatorStatsResponseParams", - "ChatMessageContentItemParams", - "ChatMessageContentParams", - "ChatMessageParams", - "ChatMessageThinkingItemParams", - "CodeEvaluatorRequestParams", - "CreateAgentLogResponseParams", - "CreateDatapointRequestParams", - "CreateDatapointRequestTargetValueParams", - "CreateEvaluatorLogResponseParams", - "CreateFlowLogResponseParams", - "CreatePromptLogResponseParams", - "CreateToolLogResponseParams", - "DashboardConfigurationParams", - "DatapointResponseParams", - "DatapointResponseTargetValueParams", - "DatasetResponseParams", - "DirectoryResponseParams", - "DirectoryWithParentsAndChildrenResponseFilesItemParams", - "DirectoryWithParentsAndChildrenResponseParams", - "EnvironmentResponseParams", - "EvaluateeRequestParams", - "EvaluateeResponseParams", - "EvaluationEvaluatorResponseParams", - "EvaluationLogResponseParams", - "EvaluationResponseParams", - "EvaluationRunResponseParams", - "EvaluationRunsResponseParams", - "EvaluationStatsParams", - "EvaluatorActivationDeactivationRequestActivateItemParams", - "EvaluatorActivationDeactivationRequestDeactivateItemParams", - "EvaluatorActivationDeactivationRequestParams", - "EvaluatorAggregateParams", - "EvaluatorConfigResponseParams", - "EvaluatorFileIdParams", - "EvaluatorFilePathParams", - "EvaluatorJudgmentNumberLimitParams", - "EvaluatorJudgmentOptionResponseParams", - "EvaluatorLogResponseJudgmentParams", - "EvaluatorLogResponseParams", - "EvaluatorResponseParams", - "EvaluatorResponseSpecParams", - "EvaluatorVersionIdParams", - "ExternalEvaluatorRequestParams", - "FileEnvironmentResponseFileParams", - "FileEnvironmentResponseParams", - "FileEnvironmentVariableRequestParams", - "FileIdParams", - "FilePathParams", - "FileRequestParams", - "FlowKernelRequestParams", - "FlowLogResponseParams", - "FlowResponseParams", - "FunctionToolChoiceParams", - "FunctionToolParams", - "HttpValidationErrorParams", - "HumanEvaluatorRequestParams", - "ImageChatContentParams", - "ImageUrlParams", - "InputResponseParams", - "LinkedFileRequestParams", - "LinkedToolResponseParams", - "ListAgentsParams", - "ListDatasetsParams", - "ListEvaluatorsParams", - "ListFlowsParams", - "ListPromptsParams", - "ListToolsParams", - "LlmEvaluatorRequestParams", - "LogResponseParams", - "LogStreamResponseParams", - "MonitoringEvaluatorEnvironmentRequestParams", - "MonitoringEvaluatorResponseParams", - "MonitoringEvaluatorVersionRequestParams", - "NumericEvaluatorStatsResponseParams", - "OverallStatsParams", - "PaginatedDataAgentResponseParams", - "PaginatedDataEvaluationLogResponseParams", - "PaginatedDataEvaluatorResponseParams", - "PaginatedDataFlowResponseParams", - "PaginatedDataLogResponseParams", - "PaginatedDataPromptResponseParams", - "PaginatedDataToolResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", - "PaginatedDatapointResponseParams", - "PaginatedDatasetResponseParams", - "PaginatedEvaluationResponseParams", - "PopulateTemplateResponseParams", - "PopulateTemplateResponsePopulatedTemplateParams", - "PopulateTemplateResponseReasoningEffortParams", - "PopulateTemplateResponseStopParams", - "PopulateTemplateResponseTemplateParams", - "PromptCallLogResponseParams", - "PromptCallResponseParams", - "PromptCallResponseToolChoiceParams", - "PromptCallStreamResponseParams", - "PromptKernelRequestParams", - "PromptKernelRequestReasoningEffortParams", - "PromptKernelRequestStopParams", - "PromptKernelRequestTemplateParams", - "PromptLogResponseParams", - "PromptLogResponseToolChoiceParams", - "PromptResponseParams", - "PromptResponseReasoningEffortParams", - "PromptResponseStopParams", - "PromptResponseTemplateParams", - "ProviderApiKeysParams", - "ResponseFormatParams", - "RunStatsResponseEvaluatorStatsItemParams", - "RunStatsResponseParams", - "RunVersionResponseParams", - "SelectEvaluatorStatsResponseParams", - "TextChatContentParams", - "TextEvaluatorStatsResponseParams", - "ToolCallParams", - "ToolCallResponseParams", - "ToolChoiceParams", - "ToolFunctionParams", - "ToolKernelRequestParams", - "ToolLogResponseParams", - "ToolResponseParams", - "UpdateVersionRequestParams", - "ValidationErrorLocItemParams", - "ValidationErrorParams", - "VersionDeploymentResponseFileParams", - "VersionDeploymentResponseParams", - "VersionIdParams", - "VersionIdResponseParams", - "VersionIdResponseVersionParams", - "VersionReferenceResponseParams", - "VersionStatsResponseEvaluatorVersionStatsItemParams", - "VersionStatsResponseParams", -] diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py deleted file mode 100644 index 1e72ba93..00000000 --- a/src/humanloop/requests/agent_call_response.py +++ /dev/null @@ -1,202 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams -from .agent_response import AgentResponseParams -from .chat_message import ChatMessageParams -from .evaluator_log_response import EvaluatorLogResponseParams -from .log_response import LogResponseParams - - -class AgentCallResponseParams(typing_extensions.TypedDict): - """ - Response model for a Agent call. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the provider. - """ - - prompt_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing_extensions.NotRequired[int] - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing_extensions.NotRequired[str] - """ - Reason the generation finished. - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams] - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - agent: AgentResponseParams - """ - Agent that generated the Log. - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] - """ - Logs nested under this Log in the Trace. - """ - - previous_agent_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. - """ diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py deleted file mode 100644 index 906cdf4b..00000000 --- a/src/humanloop/requests/agent_call_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoiceParams - -AgentCallResponseToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py deleted file mode 100644 index 9bc8d29c..00000000 --- a/src/humanloop/requests/agent_call_stream_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from ..types.event_type import EventType -from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams - - -class AgentCallStreamResponseParams(typing_extensions.TypedDict): - """ - Response model for calling Agent in streaming mode. - """ - - log_id: str - message: str - payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams] - type: EventType - created_at: dt.datetime diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py deleted file mode 100644 index 876525c3..00000000 --- a/src/humanloop/requests/agent_call_stream_response_payload.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .log_response import LogResponseParams -from .log_stream_response import LogStreamResponseParams -from .tool_call import ToolCallParams - -AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams] diff --git a/src/humanloop/requests/agent_config_response.py b/src/humanloop/requests/agent_config_response.py deleted file mode 100644 index c2bd9e46..00000000 --- a/src/humanloop/requests/agent_config_response.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class AgentConfigResponseParams(typing_extensions.TypedDict): - pass diff --git a/src/humanloop/requests/agent_continue_call_response.py b/src/humanloop/requests/agent_continue_call_response.py deleted file mode 100644 index d30b4f39..00000000 --- a/src/humanloop/requests/agent_continue_call_response.py +++ /dev/null @@ -1,202 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams -from .agent_response import AgentResponseParams -from .chat_message import ChatMessageParams -from .evaluator_log_response import EvaluatorLogResponseParams -from .log_response import LogResponseParams - - -class AgentContinueCallResponseParams(typing_extensions.TypedDict): - """ - Response model for continuing an Agent call. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the provider. - """ - - prompt_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing_extensions.NotRequired[int] - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing_extensions.NotRequired[str] - """ - Reason the generation finished. - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing_extensions.NotRequired[AgentContinueCallResponseToolChoiceParams] - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - agent: AgentResponseParams - """ - Agent that generated the Log. - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] - """ - Logs nested under this Log in the Trace. - """ - - previous_agent_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. - """ diff --git a/src/humanloop/requests/agent_continue_call_response_tool_choice.py b/src/humanloop/requests/agent_continue_call_response_tool_choice.py deleted file mode 100644 index 2111fd9a..00000000 --- a/src/humanloop/requests/agent_continue_call_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoiceParams - -AgentContinueCallResponseToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/requests/agent_continue_call_stream_response.py b/src/humanloop/requests/agent_continue_call_stream_response.py deleted file mode 100644 index bf725bb5..00000000 --- a/src/humanloop/requests/agent_continue_call_stream_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from ..types.event_type import EventType -from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams - - -class AgentContinueCallStreamResponseParams(typing_extensions.TypedDict): - """ - Response model for continuing an Agent call in streaming mode. - """ - - log_id: str - message: str - payload: typing_extensions.NotRequired[AgentContinueCallStreamResponsePayloadParams] - type: EventType - created_at: dt.datetime diff --git a/src/humanloop/requests/agent_continue_call_stream_response_payload.py b/src/humanloop/requests/agent_continue_call_stream_response_payload.py deleted file mode 100644 index e176905a..00000000 --- a/src/humanloop/requests/agent_continue_call_stream_response_payload.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .log_response import LogResponseParams -from .log_stream_response import LogStreamResponseParams -from .tool_call import ToolCallParams - -AgentContinueCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams] diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py deleted file mode 100644 index 4d86d77e..00000000 --- a/src/humanloop/requests/agent_inline_tool.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.on_agent_call_enum import OnAgentCallEnum -from .tool_function import ToolFunctionParams - - -class AgentInlineToolParams(typing_extensions.TypedDict): - type: typing.Literal["inline"] - json_schema: ToolFunctionParams - on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py deleted file mode 100644 index 8bc43e3d..00000000 --- a/src/humanloop/requests/agent_kernel_request.py +++ /dev/null @@ -1,112 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.template_language import TemplateLanguage -from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams -from .agent_kernel_request_stop import AgentKernelRequestStopParams -from .agent_kernel_request_template import AgentKernelRequestTemplateParams -from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams -from .response_format import ResponseFormatParams - - -class AgentKernelRequestParams(typing_extensions.TypedDict): - """ - Base class used by both PromptKernelRequest and AgentKernelRequest. - - Contains the consistent Prompt-related fields. - """ - - model: str - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing_extensions.NotRequired[ModelEndpoints] - """ - The provider model endpoint used. - """ - - template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams] - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing_extensions.NotRequired[TemplateLanguage] - """ - The template language to use for rendering the template. - """ - - provider: typing_extensions.NotRequired[ModelProviders] - """ - The company providing the underlying model service. - """ - - max_tokens: typing_extensions.NotRequired[int] - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing_extensions.NotRequired[float] - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing_extensions.NotRequired[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing_extensions.NotRequired[AgentKernelRequestStopParams] - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing_extensions.NotRequired[int] - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing_extensions.NotRequired[ResponseFormatParams] - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams] - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]] - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - max_iterations: typing_extensions.NotRequired[int] - """ - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - """ diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py deleted file mode 100644 index ef446d7b..00000000 --- a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort - -AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py deleted file mode 100644 index eae95d35..00000000 --- a/src/humanloop/requests/agent_kernel_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py deleted file mode 100644 index 875dc18b..00000000 --- a/src/humanloop/requests/agent_kernel_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessageParams - -AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py deleted file mode 100644 index 5ee508f8..00000000 --- a/src/humanloop/requests/agent_kernel_request_tools_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_inline_tool import AgentInlineToolParams -from .agent_linked_file_request import AgentLinkedFileRequestParams - -AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py deleted file mode 100644 index e8950811..00000000 --- a/src/humanloop/requests/agent_linked_file_request.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.on_agent_call_enum import OnAgentCallEnum -from .linked_file_request import LinkedFileRequestParams - - -class AgentLinkedFileRequestParams(typing_extensions.TypedDict): - type: typing.Literal["file"] - link: LinkedFileRequestParams - on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py deleted file mode 100644 index 1bcc8128..00000000 --- a/src/humanloop/requests/agent_linked_file_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import typing_extensions -from ..types.on_agent_call_enum import OnAgentCallEnum -from .linked_file_request import LinkedFileRequestParams - -if typing.TYPE_CHECKING: - from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams - - -class AgentLinkedFileResponseParams(typing_extensions.TypedDict): - type: typing.Literal["file"] - link: LinkedFileRequestParams - on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] - file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"] diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py deleted file mode 100644 index 25c71dbe..00000000 --- a/src/humanloop/requests/agent_linked_file_response_file.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .dataset_response import DatasetResponseParams - -if typing.TYPE_CHECKING: - from .agent_response import AgentResponseParams - from .evaluator_response import EvaluatorResponseParams - from .flow_response import FlowResponseParams - from .prompt_response import PromptResponseParams - from .tool_response import ToolResponseParams -AgentLinkedFileResponseFileParams = typing.Union[ - "PromptResponseParams", - "ToolResponseParams", - DatasetResponseParams, - "EvaluatorResponseParams", - "FlowResponseParams", - "AgentResponseParams", -] diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py deleted file mode 100644 index 940f348f..00000000 --- a/src/humanloop/requests/agent_log_response.py +++ /dev/null @@ -1,201 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams -from .agent_response import AgentResponseParams -from .chat_message import ChatMessageParams - -if typing.TYPE_CHECKING: - from .evaluator_log_response import EvaluatorLogResponseParams - from .log_response import LogResponseParams - - -class AgentLogResponseParams(typing_extensions.TypedDict): - """ - General request for creating a Log - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the provider. - """ - - prompt_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing_extensions.NotRequired[int] - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing_extensions.NotRequired[str] - """ - Reason the generation finished. - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams] - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - agent: AgentResponseParams - """ - Agent that generated the Log. - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] - """ - Logs nested under this Log in the Trace. - """ diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py deleted file mode 100644 index 30ebcb72..00000000 --- a/src/humanloop/requests/agent_log_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoiceParams - -AgentLogResponseToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py deleted file mode 100644 index cd35485e..00000000 --- a/src/humanloop/requests/agent_log_stream_response.py +++ /dev/null @@ -1,87 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from .chat_message import ChatMessageParams - - -class AgentLogStreamResponseParams(typing_extensions.TypedDict): - """ - Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the provider. - """ - - prompt_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing_extensions.NotRequired[int] - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing_extensions.NotRequired[str] - """ - Reason the generation finished. - """ - - id: str - """ - ID of the log. - """ - - agent_id: str - """ - ID of the Agent the log belongs to. - """ - - version_id: str - """ - ID of the specific version of the Agent. - """ diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py deleted file mode 100644 index 10f47b80..00000000 --- a/src/humanloop/requests/agent_response.py +++ /dev/null @@ -1,242 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.template_language import TemplateLanguage -from ..types.user_response import UserResponse -from ..types.version_status import VersionStatus -from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams -from .agent_response_stop import AgentResponseStopParams -from .agent_response_template import AgentResponseTemplateParams -from .environment_response import EnvironmentResponseParams -from .evaluator_aggregate import EvaluatorAggregateParams -from .input_response import InputResponseParams -from .response_format import ResponseFormatParams - -if typing.TYPE_CHECKING: - from .agent_response_tools_item import AgentResponseToolsItemParams - from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams - - -class AgentResponseParams(typing_extensions.TypedDict): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str - """ - Path of the Agent, including the name, which is used as a unique identifier. - """ - - id: str - """ - Unique identifier for the Agent. - """ - - directory_id: typing_extensions.NotRequired[str] - """ - ID of the directory that the file is in on Humanloop. - """ - - model: str - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing_extensions.NotRequired[ModelEndpoints] - """ - The provider model endpoint used. - """ - - template: typing_extensions.NotRequired[AgentResponseTemplateParams] - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing_extensions.NotRequired[TemplateLanguage] - """ - The template language to use for rendering the template. - """ - - provider: typing_extensions.NotRequired[ModelProviders] - """ - The company providing the underlying model service. - """ - - max_tokens: typing_extensions.NotRequired[int] - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing_extensions.NotRequired[float] - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing_extensions.NotRequired[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing_extensions.NotRequired[AgentResponseStopParams] - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing_extensions.NotRequired[int] - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing_extensions.NotRequired[ResponseFormatParams] - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams] - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing.Sequence["AgentResponseToolsItemParams"] - """ - List of tools that the Agent can call. These can be linked files or inline tools. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - max_iterations: typing_extensions.NotRequired[int] - """ - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - """ - - version_name: typing_extensions.NotRequired[str] - """ - Unique name for the Agent version. Version names must be unique for a given Agent. - """ - - version_description: typing_extensions.NotRequired[str] - """ - Description of the version, e.g., the changes made in this version. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the Agent. - """ - - tags: typing_extensions.NotRequired[typing.Sequence[str]] - """ - List of tags associated with the file. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the file. - """ - - name: str - """ - Name of the Agent. - """ - - schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The JSON schema for the Prompt. - """ - - version_id: str - """ - Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. - """ - - type: typing_extensions.NotRequired[typing.Literal["agent"]] - environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] - """ - The list of environments the Agent Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - """ - The user who created the Agent. - """ - - committed_by: typing_extensions.NotRequired[UserResponse] - """ - The user who committed the Agent Version. - """ - - committed_at: typing_extensions.NotRequired[dt.datetime] - """ - The date and time the Agent Version was committed. - """ - - status: VersionStatus - """ - The status of the Agent Version. - """ - - last_used_at: dt.datetime - version_logs_count: int - """ - The number of logs that have been generated for this Agent Version - """ - - total_logs_count: int - """ - The number of logs that have been generated across all Agent Versions - """ - - inputs: typing.Sequence[InputResponseParams] - """ - Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. - """ - - evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] - """ - Evaluators that have been attached to this Agent that are used for monitoring logs. - """ - - evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] - """ - Aggregation of Evaluator results for the Agent Version. - """ - - raw_file_content: typing_extensions.NotRequired[str] - """ - The raw content of the Agent. Corresponds to the .agent file. - """ diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py deleted file mode 100644 index a32f2ecf..00000000 --- a/src/humanloop/requests/agent_response_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort - -AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py deleted file mode 100644 index a395ee73..00000000 --- a/src/humanloop/requests/agent_response_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py deleted file mode 100644 index 3998be1b..00000000 --- a/src/humanloop/requests/agent_response_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessageParams - -AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py deleted file mode 100644 index 87e1e036..00000000 --- a/src/humanloop/requests/agent_response_tools_item.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .agent_inline_tool import AgentInlineToolParams - -if typing.TYPE_CHECKING: - from .agent_linked_file_response import AgentLinkedFileResponseParams -AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams] diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py deleted file mode 100644 index b71f614e..00000000 --- a/src/humanloop/requests/anthropic_redacted_thinking_content.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict): - type: typing.Literal["redacted_thinking"] - data: str - """ - Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic - """ diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py deleted file mode 100644 index 23fdffb6..00000000 --- a/src/humanloop/requests/anthropic_thinking_content.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class AnthropicThinkingContentParams(typing_extensions.TypedDict): - type: typing.Literal["thinking"] - thinking: str - """ - Model's chain-of-thought for providing the response. - """ - - signature: str - """ - Cryptographic signature that verifies the thinking block was generated by Anthropic. - """ diff --git a/src/humanloop/requests/boolean_evaluator_stats_response.py b/src/humanloop/requests/boolean_evaluator_stats_response.py deleted file mode 100644 index 18618f40..00000000 --- a/src/humanloop/requests/boolean_evaluator_stats_response.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class BooleanEvaluatorStatsResponseParams(typing_extensions.TypedDict): - """ - Base attributes for stats for an Evaluator Version-Evaluated Version pair - in the Evaluation. - """ - - evaluator_version_id: str - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int - """ - The total number of errored Evaluators for this Evaluator Version. - """ - - num_true: int - """ - The total number of `True` judgments for this Evaluator Version. - """ - - num_false: int - """ - The total number of `False` judgments for this Evaluator Version. - """ diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py deleted file mode 100644 index eeb6c7cd..00000000 --- a/src/humanloop/requests/chat_message.py +++ /dev/null @@ -1,41 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.chat_role import ChatRole -from .chat_message_content import ChatMessageContentParams -from .chat_message_thinking_item import ChatMessageThinkingItemParams -from .tool_call import ToolCallParams - - -class ChatMessageParams(typing_extensions.TypedDict): - content: typing_extensions.NotRequired[ChatMessageContentParams] - """ - The content of the message. - """ - - name: typing_extensions.NotRequired[str] - """ - Optional name of the message author. - """ - - tool_call_id: typing_extensions.NotRequired[str] - """ - Tool call that this message is responding to. - """ - - role: ChatRole - """ - Role of the message author. - """ - - tool_calls: typing_extensions.NotRequired[typing.Sequence[ToolCallParams]] - """ - A list of tool calls requested by the assistant. - """ - - thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]] - """ - Model's chain-of-thought for providing the response. Present on assistant messages if model supports it. - """ diff --git a/src/humanloop/requests/chat_message_content.py b/src/humanloop/requests/chat_message_content.py deleted file mode 100644 index ea04974e..00000000 --- a/src/humanloop/requests/chat_message_content.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message_content_item import ChatMessageContentItemParams - -ChatMessageContentParams = typing.Union[str, typing.Sequence[ChatMessageContentItemParams]] diff --git a/src/humanloop/requests/chat_message_content_item.py b/src/humanloop/requests/chat_message_content_item.py deleted file mode 100644 index c4a24ea7..00000000 --- a/src/humanloop/requests/chat_message_content_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .image_chat_content import ImageChatContentParams -from .text_chat_content import TextChatContentParams - -ChatMessageContentItemParams = typing.Union[TextChatContentParams, ImageChatContentParams] diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py deleted file mode 100644 index 0c54d371..00000000 --- a/src/humanloop/requests/chat_message_thinking_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams -from .anthropic_thinking_content import AnthropicThinkingContentParams - -ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams] diff --git a/src/humanloop/requests/code_evaluator_request.py b/src/humanloop/requests/code_evaluator_request.py deleted file mode 100644 index 914d8f46..00000000 --- a/src/humanloop/requests/code_evaluator_request.py +++ /dev/null @@ -1,48 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.evaluator_arguments_type import EvaluatorArgumentsType -from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum -from ..types.valence import Valence -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams - - -class CodeEvaluatorRequestParams(typing_extensions.TypedDict): - arguments_type: EvaluatorArgumentsType - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: EvaluatorReturnTypeEnum - """ - The type of the return value of the Evaluator. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] - """ - The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. - """ - - number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing_extensions.NotRequired[Valence] - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["python"] - code: str - """ - The code for the Evaluator. This code will be executed in a sandboxed environment. - """ diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py deleted file mode 100644 index f68f2e96..00000000 --- a/src/humanloop/requests/create_agent_log_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.log_status import LogStatus - - -class CreateAgentLogResponseParams(typing_extensions.TypedDict): - """ - Response for an Agent Log. - """ - - id: str - """ - Unique identifier for the Log. - """ - - agent_id: str - """ - Unique identifier for the Agent. - """ - - version_id: str - """ - Unique identifier for the Agent Version. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs. - """ diff --git a/src/humanloop/requests/create_datapoint_request.py b/src/humanloop/requests/create_datapoint_request.py deleted file mode 100644 index 10ada080..00000000 --- a/src/humanloop/requests/create_datapoint_request.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .chat_message import ChatMessageParams -from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams - - -class CreateDatapointRequestParams(typing_extensions.TypedDict): - inputs: typing_extensions.NotRequired[typing.Dict[str, str]] - """ - The inputs to the prompt template. - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - List of chat messages to provide to the model. - """ - - target: typing_extensions.NotRequired[typing.Dict[str, CreateDatapointRequestTargetValueParams]] - """ - Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. - """ diff --git a/src/humanloop/requests/create_datapoint_request_target_value.py b/src/humanloop/requests/create_datapoint_request_target_value.py deleted file mode 100644 index ff6ed57f..00000000 --- a/src/humanloop/requests/create_datapoint_request_target_value.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateDatapointRequestTargetValueParams = typing.Union[ - str, int, float, bool, typing.Sequence[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] -] diff --git a/src/humanloop/requests/create_evaluator_log_response.py b/src/humanloop/requests/create_evaluator_log_response.py deleted file mode 100644 index 29fbcdc5..00000000 --- a/src/humanloop/requests/create_evaluator_log_response.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class CreateEvaluatorLogResponseParams(typing_extensions.TypedDict): - id: str - """ - String identifier of the new Log. - """ - - parent_id: str - """ - Identifier of the evaluated parent Log. - """ - - session_id: typing_extensions.NotRequired[str] - """ - Identifier of the Session containing both the parent and the new child Log. If the parent Log does not belong to a Session, a new Session is created with this ID. - """ - - version_id: str - """ - Identifier of Evaluator Version for which the Log was registered. - """ diff --git a/src/humanloop/requests/create_flow_log_response.py b/src/humanloop/requests/create_flow_log_response.py deleted file mode 100644 index 6f490ba3..00000000 --- a/src/humanloop/requests/create_flow_log_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.log_status import LogStatus - - -class CreateFlowLogResponseParams(typing_extensions.TypedDict): - """ - Response for a Flow Log. - """ - - id: str - """ - Unique identifier for the Log. - """ - - flow_id: str - """ - Unique identifier for the Flow. - """ - - version_id: str - """ - Unique identifier for the Flow Version. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - """ diff --git a/src/humanloop/requests/create_prompt_log_response.py b/src/humanloop/requests/create_prompt_log_response.py deleted file mode 100644 index 8a0b39d3..00000000 --- a/src/humanloop/requests/create_prompt_log_response.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class CreatePromptLogResponseParams(typing_extensions.TypedDict): - id: str - """ - String ID of log. - """ - - prompt_id: str - """ - ID of the Prompt the log belongs to. - """ - - version_id: str - """ - ID of the specific version of the Prompt. - """ - - session_id: typing_extensions.NotRequired[str] - """ - String ID of session the log belongs to. - """ diff --git a/src/humanloop/requests/create_tool_log_response.py b/src/humanloop/requests/create_tool_log_response.py deleted file mode 100644 index 9b898fba..00000000 --- a/src/humanloop/requests/create_tool_log_response.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class CreateToolLogResponseParams(typing_extensions.TypedDict): - id: str - """ - String ID of log. - """ - - tool_id: str - """ - ID of the Tool the log belongs to. - """ - - version_id: str - """ - ID of the specific version of the Tool. - """ - - session_id: typing_extensions.NotRequired[str] - """ - String ID of session the log belongs to. - """ diff --git a/src/humanloop/requests/dashboard_configuration.py b/src/humanloop/requests/dashboard_configuration.py deleted file mode 100644 index b123ac78..00000000 --- a/src/humanloop/requests/dashboard_configuration.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.time_unit import TimeUnit - - -class DashboardConfigurationParams(typing_extensions.TypedDict): - time_unit: TimeUnit - time_range_days: int - model_config_ids: typing.Sequence[str] diff --git a/src/humanloop/requests/datapoint_response.py b/src/humanloop/requests/datapoint_response.py deleted file mode 100644 index ba1928e9..00000000 --- a/src/humanloop/requests/datapoint_response.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .chat_message import ChatMessageParams -from .datapoint_response_target_value import DatapointResponseTargetValueParams - - -class DatapointResponseParams(typing_extensions.TypedDict): - inputs: typing_extensions.NotRequired[typing.Dict[str, str]] - """ - The inputs to the prompt template. - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - List of chat messages to provide to the model. - """ - - target: typing_extensions.NotRequired[typing.Dict[str, DatapointResponseTargetValueParams]] - """ - Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. - """ - - id: str - """ - Unique identifier for the Datapoint. Starts with `dp_`. - """ diff --git a/src/humanloop/requests/datapoint_response_target_value.py b/src/humanloop/requests/datapoint_response_target_value.py deleted file mode 100644 index 43cbdaa7..00000000 --- a/src/humanloop/requests/datapoint_response_target_value.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DatapointResponseTargetValueParams = typing.Union[ - str, int, float, bool, typing.Sequence[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] -] diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py deleted file mode 100644 index aa0119e9..00000000 --- a/src/humanloop/requests/dataset_response.py +++ /dev/null @@ -1,102 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from ..types.user_response import UserResponse -from .datapoint_response import DatapointResponseParams -from .environment_response import EnvironmentResponseParams - - -class DatasetResponseParams(typing_extensions.TypedDict): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str - """ - Path of the Dataset, including the name, which is used as a unique identifier. - """ - - id: str - """ - Unique identifier for the Dataset. Starts with `ds_`. - """ - - directory_id: typing_extensions.NotRequired[str] - """ - ID of the directory that the file is in on Humanloop. - """ - - name: str - """ - Name of the Dataset, which is used as a unique identifier. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the Dataset. - """ - - schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The JSON schema for the File. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the file. - """ - - tags: typing_extensions.NotRequired[typing.Sequence[str]] - """ - List of tags associated with the file. - """ - - version_id: str - """ - Unique identifier for the specific Dataset Version. If no query params provided, the default deployed Dataset Version is returned. Starts with `dsv_`. - """ - - type: typing_extensions.NotRequired[typing.Literal["dataset"]] - environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] - """ - The list of environments the Dataset Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - """ - The user who created the Dataset. - """ - - last_used_at: dt.datetime - version_name: typing_extensions.NotRequired[str] - """ - Unique name for the Dataset version. Version names must be unique for a given Dataset. - """ - - version_description: typing_extensions.NotRequired[str] - """ - Description of the version, e.g., the changes made in this version. - """ - - datapoints_count: int - """ - The number of Datapoints in this Dataset version. - """ - - datapoints: typing_extensions.NotRequired[typing.Sequence[DatapointResponseParams]] - """ - The list of Datapoints in this Dataset version. Only provided if explicitly requested. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - """ diff --git a/src/humanloop/requests/directory_response.py b/src/humanloop/requests/directory_response.py deleted file mode 100644 index 4dc4a7d5..00000000 --- a/src/humanloop/requests/directory_response.py +++ /dev/null @@ -1,46 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions - - -class DirectoryResponseParams(typing_extensions.TypedDict): - id: str - """ - String ID of directory. Starts with `dir_`. - """ - - parent_id: typing_extensions.NotRequired[str] - """ - ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. - """ - - name: str - """ - Name of the directory. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the directory. - """ - - path: str - """ - Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the directory. - """ - - tags: typing.Sequence[str] - """ - List of tags associated with the directory. - """ - - created_at: dt.datetime - updated_at: dt.datetime diff --git a/src/humanloop/requests/directory_with_parents_and_children_response.py b/src/humanloop/requests/directory_with_parents_and_children_response.py deleted file mode 100644 index 27af28b6..00000000 --- a/src/humanloop/requests/directory_with_parents_and_children_response.py +++ /dev/null @@ -1,64 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from .directory_response import DirectoryResponseParams -from .directory_with_parents_and_children_response_files_item import ( - DirectoryWithParentsAndChildrenResponseFilesItemParams, -) - - -class DirectoryWithParentsAndChildrenResponseParams(typing_extensions.TypedDict): - id: str - """ - String ID of directory. Starts with `dir_`. - """ - - parent_id: typing_extensions.NotRequired[str] - """ - ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. - """ - - name: str - """ - Name of the directory. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the directory. - """ - - path: str - """ - Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the directory. - """ - - tags: typing.Sequence[str] - """ - List of tags associated with the directory. - """ - - created_at: dt.datetime - updated_at: dt.datetime - subdirectories: typing.Sequence[DirectoryResponseParams] - """ - List of subdirectories in the directory. - """ - - files: typing.Sequence[DirectoryWithParentsAndChildrenResponseFilesItemParams] - """ - List of files in the directory. - """ - - parents: typing.Sequence[DirectoryResponseParams] - """ - List of parent directories of the directory. - """ diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py deleted file mode 100644 index 1ebe44fc..00000000 --- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponseParams -from .dataset_response import DatasetResponseParams -from .evaluator_response import EvaluatorResponseParams -from .flow_response import FlowResponseParams -from .prompt_response import PromptResponseParams -from .tool_response import ToolResponseParams - -DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[ - PromptResponseParams, - ToolResponseParams, - EvaluatorResponseParams, - DatasetResponseParams, - FlowResponseParams, - AgentResponseParams, -] diff --git a/src/humanloop/requests/environment_response.py b/src/humanloop/requests/environment_response.py deleted file mode 100644 index 0c74481e..00000000 --- a/src/humanloop/requests/environment_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from ..types.environment_tag import EnvironmentTag - - -class EnvironmentResponseParams(typing_extensions.TypedDict): - id: str - created_at: dt.datetime - name: str - tag: EnvironmentTag diff --git a/src/humanloop/requests/evaluatee_request.py b/src/humanloop/requests/evaluatee_request.py deleted file mode 100644 index d7544be1..00000000 --- a/src/humanloop/requests/evaluatee_request.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class EvaluateeRequestParams(typing_extensions.TypedDict): - """ - Specification of a File version on Humanloop. - - This can be done in a couple of ways: - - Specifying `version_id` directly. - - Specifying a File (and optionally an Environment). - - A File can be specified by either `path` or `file_id`. - - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used. - """ - - version_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the File Version. If provided, none of the other fields should be specified. - """ - - path: typing_extensions.NotRequired[str] - """ - Path identifying a File. Provide either this or `file_id` if you want to specify a File. - """ - - file_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the File. Provide either this or `path` if you want to specify a File. - """ - - environment: typing_extensions.NotRequired[str] - """ - Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used. - """ - - batch_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - orchestrated: typing_extensions.NotRequired[bool] - """ - Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - """ diff --git a/src/humanloop/requests/evaluatee_response.py b/src/humanloop/requests/evaluatee_response.py deleted file mode 100644 index fb860a37..00000000 --- a/src/humanloop/requests/evaluatee_response.py +++ /dev/null @@ -1,33 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from .run_version_response import RunVersionResponseParams - - -class EvaluateeResponseParams(typing_extensions.TypedDict): - """ - Version of the Evaluatee being evaluated. - """ - - version: typing_extensions.NotRequired[RunVersionResponseParams] - batch_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - orchestrated: bool - """ - Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - """ - - pinned: bool - """ - Pinned Evaluatees are shown in Humanloop's Overview, allowing you to use them as baselines for comparison. - """ - - added_at: typing_extensions.NotRequired[dt.datetime] - """ - When the Evaluatee was added to the Evaluation. - """ diff --git a/src/humanloop/requests/evaluation_evaluator_response.py b/src/humanloop/requests/evaluation_evaluator_response.py deleted file mode 100644 index 3d40ba33..00000000 --- a/src/humanloop/requests/evaluation_evaluator_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from .evaluator_response import EvaluatorResponseParams - - -class EvaluationEvaluatorResponseParams(typing_extensions.TypedDict): - version: EvaluatorResponseParams - orchestrated: bool - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ - - added_at: dt.datetime - """ - When the Evaluator was added to the Evaluation. - """ diff --git a/src/humanloop/requests/evaluation_log_response.py b/src/humanloop/requests/evaluation_log_response.py deleted file mode 100644 index 5bbd0649..00000000 --- a/src/humanloop/requests/evaluation_log_response.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .datapoint_response import DatapointResponseParams -from .log_response import LogResponseParams - - -class EvaluationLogResponseParams(typing_extensions.TypedDict): - run_id: str - """ - Unique identifier for the Run. - """ - - datapoint: typing_extensions.NotRequired[DatapointResponseParams] - """ - The Datapoint used to generate the Log - """ - - log: LogResponseParams - """ - The Log that was evaluated by the Evaluator. - """ - - evaluator_logs: typing.Sequence[LogResponseParams] - """ - The Evaluator Logs containing the judgments for the Log. - """ diff --git a/src/humanloop/requests/evaluation_response.py b/src/humanloop/requests/evaluation_response.py deleted file mode 100644 index 4c077927..00000000 --- a/src/humanloop/requests/evaluation_response.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from ..types.user_response import UserResponse -from .evaluation_evaluator_response import EvaluationEvaluatorResponseParams - - -class EvaluationResponseParams(typing_extensions.TypedDict): - id: str - """ - Unique identifier for the Evaluation. Starts with `evr`. - """ - - runs_count: int - """ - The total number of Runs in the Evaluation. - """ - - evaluators: typing.Sequence[EvaluationEvaluatorResponseParams] - """ - The Evaluator Versions used to evaluate. - """ - - name: typing_extensions.NotRequired[str] - """ - Name of the Evaluation to help identify it. Must be unique among Evaluations associated with File. - """ - - file_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the File associated with the Evaluation. - """ - - created_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - updated_at: dt.datetime - url: typing_extensions.NotRequired[str] - """ - URL to view the Evaluation on the Humanloop. - """ diff --git a/src/humanloop/requests/evaluation_run_response.py b/src/humanloop/requests/evaluation_run_response.py deleted file mode 100644 index 5dd7c782..00000000 --- a/src/humanloop/requests/evaluation_run_response.py +++ /dev/null @@ -1,56 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from ..types.evaluation_status import EvaluationStatus -from ..types.user_response import UserResponse -from .dataset_response import DatasetResponseParams -from .run_version_response import RunVersionResponseParams - - -class EvaluationRunResponseParams(typing_extensions.TypedDict): - id: str - """ - Unique identifier for the Run. - """ - - dataset: typing_extensions.NotRequired[DatasetResponseParams] - """ - The Dataset used in the Run. - """ - - version: typing_extensions.NotRequired[RunVersionResponseParams] - """ - The version used in the Run. - """ - - orchestrated: bool - """ - Whether the Run is orchestrated by Humanloop. - """ - - added_at: dt.datetime - """ - When the Run was added to the Evaluation. - """ - - created_at: dt.datetime - """ - When the Run was created. - """ - - created_by: typing_extensions.NotRequired[UserResponse] - """ - The User who created the Run. - """ - - status: EvaluationStatus - """ - The status of the Run. - """ - - control: bool - """ - Stats for other Runs will be displayed in comparison to the control Run. - """ diff --git a/src/humanloop/requests/evaluation_runs_response.py b/src/humanloop/requests/evaluation_runs_response.py deleted file mode 100644 index fd3d4792..00000000 --- a/src/humanloop/requests/evaluation_runs_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .evaluation_run_response import EvaluationRunResponseParams - - -class EvaluationRunsResponseParams(typing_extensions.TypedDict): - runs: typing.Sequence[EvaluationRunResponseParams] - """ - The Runs in the Evaluation. - """ diff --git a/src/humanloop/requests/evaluation_stats.py b/src/humanloop/requests/evaluation_stats.py deleted file mode 100644 index edd56e15..00000000 --- a/src/humanloop/requests/evaluation_stats.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.evaluation_status import EvaluationStatus -from .run_stats_response import RunStatsResponseParams - - -class EvaluationStatsParams(typing_extensions.TypedDict): - run_stats: typing.Sequence[RunStatsResponseParams] - """ - Stats for each Run in the Evaluation. - """ - - progress: typing_extensions.NotRequired[str] - """ - A summary string report of the Evaluation's progress you can print to the command line;helpful when integrating Evaluations with CI/CD. - """ - - report: typing_extensions.NotRequired[str] - """ - A summary string report of the Evaluation you can print to command line;helpful when integrating Evaluations with CI/CD. - """ - - status: EvaluationStatus - """ - The current status of the Evaluation. - """ diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request.py b/src/humanloop/requests/evaluator_activation_deactivation_request.py deleted file mode 100644 index b3f3f91d..00000000 --- a/src/humanloop/requests/evaluator_activation_deactivation_request.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from .evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) - - -class EvaluatorActivationDeactivationRequestParams(typing_extensions.TypedDict): - activate: typing_extensions.NotRequired[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - """ - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - """ - - deactivate: typing_extensions.NotRequired[ - typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] - ] - """ - Evaluators to deactivate. These will not be run on new Logs. - """ diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py b/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py deleted file mode 100644 index 049c4cc8..00000000 --- a/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams -from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams - -EvaluatorActivationDeactivationRequestActivateItemParams = typing.Union[ - MonitoringEvaluatorVersionRequestParams, MonitoringEvaluatorEnvironmentRequestParams -] diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py b/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py deleted file mode 100644 index 4a21dcaf..00000000 --- a/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams -from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams - -EvaluatorActivationDeactivationRequestDeactivateItemParams = typing.Union[ - MonitoringEvaluatorVersionRequestParams, MonitoringEvaluatorEnvironmentRequestParams -] diff --git a/src/humanloop/requests/evaluator_aggregate.py b/src/humanloop/requests/evaluator_aggregate.py deleted file mode 100644 index f8840d4f..00000000 --- a/src/humanloop/requests/evaluator_aggregate.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions - - -class EvaluatorAggregateParams(typing_extensions.TypedDict): - value: float - """ - The aggregated value of the evaluator. - """ - - evaluator_id: str - """ - ID of the evaluator. - """ - - evaluator_version_id: str - """ - ID of the evaluator version. - """ - - created_at: dt.datetime - updated_at: dt.datetime diff --git a/src/humanloop/requests/evaluator_config_response.py b/src/humanloop/requests/evaluator_config_response.py deleted file mode 100644 index de75afcf..00000000 --- a/src/humanloop/requests/evaluator_config_response.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class EvaluatorConfigResponseParams(typing_extensions.TypedDict): - pass diff --git a/src/humanloop/requests/evaluator_file_id.py b/src/humanloop/requests/evaluator_file_id.py deleted file mode 100644 index 952eda84..00000000 --- a/src/humanloop/requests/evaluator_file_id.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class EvaluatorFileIdParams(typing_extensions.TypedDict): - """ - Base model for specifying an Evaluator for an Evaluation. - """ - - environment: typing_extensions.NotRequired[str] - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - id: str - """ - Unique identifier for the File. - """ - - orchestrated: typing_extensions.NotRequired[bool] - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ diff --git a/src/humanloop/requests/evaluator_file_path.py b/src/humanloop/requests/evaluator_file_path.py deleted file mode 100644 index 0b1a06c9..00000000 --- a/src/humanloop/requests/evaluator_file_path.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class EvaluatorFilePathParams(typing_extensions.TypedDict): - """ - Base model for specifying an Evaluator for an Evaluation. - """ - - environment: typing_extensions.NotRequired[str] - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - path: str - """ - Path identifying a File. Provide this to specify a File. - """ - - orchestrated: typing_extensions.NotRequired[bool] - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ diff --git a/src/humanloop/requests/evaluator_judgment_number_limit.py b/src/humanloop/requests/evaluator_judgment_number_limit.py deleted file mode 100644 index 3cdd87db..00000000 --- a/src/humanloop/requests/evaluator_judgment_number_limit.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class EvaluatorJudgmentNumberLimitParams(typing_extensions.TypedDict): - min: typing_extensions.NotRequired[float] - """ - The minimum value that can be selected. - """ - - max: typing_extensions.NotRequired[float] - """ - The maximum value that can be selected. - """ - - step: typing_extensions.NotRequired[float] - """ - The step size for the number input. - """ diff --git a/src/humanloop/requests/evaluator_judgment_option_response.py b/src/humanloop/requests/evaluator_judgment_option_response.py deleted file mode 100644 index 77724406..00000000 --- a/src/humanloop/requests/evaluator_judgment_option_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.valence import Valence - - -class EvaluatorJudgmentOptionResponseParams(typing_extensions.TypedDict): - name: str - """ - The name of the option. - """ - - valence: typing_extensions.NotRequired[Valence] - """ - Whether this option should be considered positive or negative. - """ diff --git a/src/humanloop/requests/evaluator_log_response.py b/src/humanloop/requests/evaluator_log_response.py deleted file mode 100644 index c434280e..00000000 --- a/src/humanloop/requests/evaluator_log_response.py +++ /dev/null @@ -1,176 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .chat_message import ChatMessageParams -from .evaluator_log_response_judgment import EvaluatorLogResponseJudgmentParams -from .evaluator_response import EvaluatorResponseParams - -if typing.TYPE_CHECKING: - from .log_response import LogResponseParams - - -class EvaluatorLogResponseParams(typing_extensions.TypedDict): - """ - General request for creating a Log - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - parent_id: typing_extensions.NotRequired[str] - """ - Identifier of the evaluated Log. The newly created Log will have this one set as parent. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the LLM. Only populated for LLM Evaluator Logs. - """ - - judgment: typing_extensions.NotRequired[EvaluatorLogResponseJudgmentParams] - """ - Evaluator assessment of the Log. - """ - - marked_completed: typing_extensions.NotRequired[bool] - """ - Whether the Log has been manually marked as completed by a user. - """ - - id: str - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] - """ - Logs nested under this Log in the Trace. - """ - - evaluator: EvaluatorResponseParams - """ - Evaluator used to generate the judgment. - """ - - parent: typing_extensions.NotRequired["LogResponseParams"] - """ - The Log that was evaluated. Only provided if the ?include_parent query parameter is set for the - """ diff --git a/src/humanloop/requests/evaluator_log_response_judgment.py b/src/humanloop/requests/evaluator_log_response_judgment.py deleted file mode 100644 index 8958f7d9..00000000 --- a/src/humanloop/requests/evaluator_log_response_judgment.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluatorLogResponseJudgmentParams = typing.Union[bool, str, typing.Sequence[str], float] diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py deleted file mode 100644 index 38093ae5..00000000 --- a/src/humanloop/requests/evaluator_response.py +++ /dev/null @@ -1,122 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.user_response import UserResponse -from .environment_response import EnvironmentResponseParams -from .evaluator_aggregate import EvaluatorAggregateParams -from .evaluator_response_spec import EvaluatorResponseSpecParams -from .input_response import InputResponseParams - -if typing.TYPE_CHECKING: - from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams - - -class EvaluatorResponseParams(typing_extensions.TypedDict): - """ - Version of the Evaluator used to provide judgments. - """ - - path: str - """ - Path of the Evaluator including the Evaluator name, which is used as a unique identifier. - """ - - id: str - """ - Unique identifier for the Evaluator. - """ - - directory_id: typing_extensions.NotRequired[str] - """ - ID of the directory that the file is in on Humanloop. - """ - - version_name: typing_extensions.NotRequired[str] - """ - Unique name for the Evaluator version. Version names must be unique for a given Evaluator. - """ - - version_description: typing_extensions.NotRequired[str] - """ - Description of the version, e.g., the changes made in this version. - """ - - spec: EvaluatorResponseSpecParams - name: str - """ - Name of the Evaluator, which is used as a unique identifier. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the Evaluator. - """ - - schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The JSON schema for the File. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the file. - """ - - tags: typing_extensions.NotRequired[typing.Sequence[str]] - """ - List of tags associated with the file. - """ - - version_id: str - """ - Unique identifier for the specific Evaluator Version. If no query params provided, the default deployed Evaluator Version is returned. - """ - - type: typing_extensions.NotRequired[typing.Literal["evaluator"]] - environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] - """ - The list of environments the Evaluator Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - """ - The user who created the Evaluator. - """ - - last_used_at: dt.datetime - version_logs_count: int - """ - The number of logs that have been generated for this Evaluator Version - """ - - total_logs_count: int - """ - The number of logs that have been generated across all Evaluator Versions - """ - - inputs: typing.Sequence[InputResponseParams] - """ - Inputs associated to the Evaluator. Inputs correspond to any of the variables used within the Evaluator template. - """ - - evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] - """ - Evaluators that have been attached to this Evaluator that are used for monitoring logs. - """ - - evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] - """ - Aggregation of Evaluator results for the Evaluator Version. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ diff --git a/src/humanloop/requests/evaluator_response_spec.py b/src/humanloop/requests/evaluator_response_spec.py deleted file mode 100644 index 72cf3d82..00000000 --- a/src/humanloop/requests/evaluator_response_spec.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .code_evaluator_request import CodeEvaluatorRequestParams -from .external_evaluator_request import ExternalEvaluatorRequestParams -from .human_evaluator_request import HumanEvaluatorRequestParams -from .llm_evaluator_request import LlmEvaluatorRequestParams - -EvaluatorResponseSpecParams = typing.Union[ - LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams -] diff --git a/src/humanloop/requests/evaluator_version_id.py b/src/humanloop/requests/evaluator_version_id.py deleted file mode 100644 index 94700595..00000000 --- a/src/humanloop/requests/evaluator_version_id.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class EvaluatorVersionIdParams(typing_extensions.TypedDict): - """ - Base model for specifying an Evaluator for an Evaluation. - """ - - version_id: str - """ - Unique identifier for the Version. - """ - - orchestrated: typing_extensions.NotRequired[bool] - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ diff --git a/src/humanloop/requests/external_evaluator_request.py b/src/humanloop/requests/external_evaluator_request.py deleted file mode 100644 index 6e77103f..00000000 --- a/src/humanloop/requests/external_evaluator_request.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.evaluator_arguments_type import EvaluatorArgumentsType -from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum -from ..types.valence import Valence -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams - - -class ExternalEvaluatorRequestParams(typing_extensions.TypedDict): - arguments_type: EvaluatorArgumentsType - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: EvaluatorReturnTypeEnum - """ - The type of the return value of the Evaluator. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] - """ - The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. - """ - - number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing_extensions.NotRequired[Valence] - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["external"] diff --git a/src/humanloop/requests/file_environment_response.py b/src/humanloop/requests/file_environment_response.py deleted file mode 100644 index 40b60bc7..00000000 --- a/src/humanloop/requests/file_environment_response.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from ..types.environment_tag import EnvironmentTag -from .file_environment_response_file import FileEnvironmentResponseFileParams - - -class FileEnvironmentResponseParams(typing_extensions.TypedDict): - """ - Response model for the List Environments endpoint under Files. - - Contains the deployed version of the File, if one is deployed to the Environment. - """ - - id: str - created_at: dt.datetime - name: str - tag: EnvironmentTag - file: typing_extensions.NotRequired[FileEnvironmentResponseFileParams] - """ - The version of the File that is deployed to the Environment, if one is deployed. - """ diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py deleted file mode 100644 index 1a2021cb..00000000 --- a/src/humanloop/requests/file_environment_response_file.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponseParams -from .dataset_response import DatasetResponseParams -from .evaluator_response import EvaluatorResponseParams -from .flow_response import FlowResponseParams -from .prompt_response import PromptResponseParams -from .tool_response import ToolResponseParams - -FileEnvironmentResponseFileParams = typing.Union[ - PromptResponseParams, - ToolResponseParams, - DatasetResponseParams, - EvaluatorResponseParams, - FlowResponseParams, - AgentResponseParams, -] diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py deleted file mode 100644 index bb70bda4..00000000 --- a/src/humanloop/requests/file_environment_variable_request.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict): - name: str - """ - Name of the environment variable. - """ - - value: str - """ - Value of the environment variable. - """ diff --git a/src/humanloop/requests/file_id.py b/src/humanloop/requests/file_id.py deleted file mode 100644 index d6c39755..00000000 --- a/src/humanloop/requests/file_id.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class FileIdParams(typing_extensions.TypedDict): - """ - Specification of a File by its ID. - """ - - environment: typing_extensions.NotRequired[str] - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - id: str - """ - Unique identifier for the File. - """ diff --git a/src/humanloop/requests/file_path.py b/src/humanloop/requests/file_path.py deleted file mode 100644 index 238927d8..00000000 --- a/src/humanloop/requests/file_path.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class FilePathParams(typing_extensions.TypedDict): - """ - Specification of a File by its path. - """ - - environment: typing_extensions.NotRequired[str] - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - path: str - """ - Path identifying a File. Provide this to specify a File. - """ diff --git a/src/humanloop/requests/file_request.py b/src/humanloop/requests/file_request.py deleted file mode 100644 index 91e730d6..00000000 --- a/src/humanloop/requests/file_request.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class FileRequestParams(typing_extensions.TypedDict): - id: typing_extensions.NotRequired[str] - """ - ID for an existing File. - """ - - path: typing_extensions.NotRequired[str] - """ - Path of the File, including the name. This locates the File in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - """ diff --git a/src/humanloop/requests/flow_kernel_request.py b/src/humanloop/requests/flow_kernel_request.py deleted file mode 100644 index 0a2b7993..00000000 --- a/src/humanloop/requests/flow_kernel_request.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class FlowKernelRequestParams(typing_extensions.TypedDict): - attributes: typing.Dict[str, typing.Optional[typing.Any]] - """ - A key-value object identifying the Flow Version. - """ diff --git a/src/humanloop/requests/flow_log_response.py b/src/humanloop/requests/flow_log_response.py deleted file mode 100644 index 661fc301..00000000 --- a/src/humanloop/requests/flow_log_response.py +++ /dev/null @@ -1,161 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .chat_message import ChatMessageParams -from .flow_response import FlowResponseParams - -if typing.TYPE_CHECKING: - from .evaluator_log_response import EvaluatorLogResponseParams - from .log_response import LogResponseParams - - -class FlowLogResponseParams(typing_extensions.TypedDict): - """ - General request for creating a Log - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - List of chat messages that were used as an input to the Flow. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The output message returned by this Flow. - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the Flow Log. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] - """ - Logs nested under this Log in the Trace. - """ - - flow: FlowResponseParams - """ - Flow used to generate the Log. - """ diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py deleted file mode 100644 index 19087c61..00000000 --- a/src/humanloop/requests/flow_response.py +++ /dev/null @@ -1,109 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.user_response import UserResponse -from .environment_response import EnvironmentResponseParams -from .evaluator_aggregate import EvaluatorAggregateParams - -if typing.TYPE_CHECKING: - from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams - - -class FlowResponseParams(typing_extensions.TypedDict): - """ - Response model for a Flow. - """ - - path: str - """ - Path of the Flow, including the name, which is used as a unique identifier. - """ - - id: str - """ - Unique identifier for the Flow. - """ - - directory_id: typing_extensions.NotRequired[str] - """ - ID of the directory that the file is in on Humanloop. - """ - - attributes: typing.Dict[str, typing.Optional[typing.Any]] - """ - A key-value object identifying the Flow Version. - """ - - version_name: typing_extensions.NotRequired[str] - """ - Unique name for the Flow version. Version names must be unique for a given Flow. - """ - - version_description: typing_extensions.NotRequired[str] - """ - Description of the Version. - """ - - name: str - """ - Name of the Flow. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the Flow. - """ - - schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The JSON schema for the File. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the file. - """ - - tags: typing_extensions.NotRequired[typing.Sequence[str]] - """ - List of tags associated with the file. - """ - - version_id: str - """ - Unique identifier for the specific Flow Version. If no query params provided, the default deployed Flow Version is returned. - """ - - type: typing_extensions.NotRequired[typing.Literal["flow"]] - environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] - """ - The list of environments the Flow Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - """ - The user who created the Flow. - """ - - last_used_at: dt.datetime - version_logs_count: int - """ - The number of logs that have been generated for this Flow Version - """ - - evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] - """ - Aggregation of Evaluator results for the Flow Version. - """ - - evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] - """ - The list of Monitoring Evaluators associated with the Flow Version. - """ diff --git a/src/humanloop/requests/function_tool.py b/src/humanloop/requests/function_tool.py deleted file mode 100644 index 473b2b6e..00000000 --- a/src/humanloop/requests/function_tool.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class FunctionToolParams(typing_extensions.TypedDict): - """ - A function tool to be called by the model where user owns runtime. - """ - - name: str - arguments: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/function_tool_choice.py b/src/humanloop/requests/function_tool_choice.py deleted file mode 100644 index 4b1c6c47..00000000 --- a/src/humanloop/requests/function_tool_choice.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class FunctionToolChoiceParams(typing_extensions.TypedDict): - """ - A function tool to be called by the model where user owns runtime. - """ - - name: str diff --git a/src/humanloop/requests/http_validation_error.py b/src/humanloop/requests/http_validation_error.py deleted file mode 100644 index 7b0ed08f..00000000 --- a/src/humanloop/requests/http_validation_error.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .validation_error import ValidationErrorParams - - -class HttpValidationErrorParams(typing_extensions.TypedDict): - detail: typing_extensions.NotRequired[typing.Sequence[ValidationErrorParams]] diff --git a/src/humanloop/requests/human_evaluator_request.py b/src/humanloop/requests/human_evaluator_request.py deleted file mode 100644 index 9bd32e2d..00000000 --- a/src/humanloop/requests/human_evaluator_request.py +++ /dev/null @@ -1,48 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.evaluator_arguments_type import EvaluatorArgumentsType -from ..types.human_evaluator_request_return_type import HumanEvaluatorRequestReturnType -from ..types.valence import Valence -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams - - -class HumanEvaluatorRequestParams(typing_extensions.TypedDict): - arguments_type: EvaluatorArgumentsType - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: HumanEvaluatorRequestReturnType - """ - The type of the return value of the Evaluator. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] - """ - The options that can be applied as judgments. - """ - - number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing_extensions.NotRequired[Valence] - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["human"] - instructions: typing_extensions.NotRequired[str] - """ - Instructions and guidelines for applying judgments. - """ diff --git a/src/humanloop/requests/image_chat_content.py b/src/humanloop/requests/image_chat_content.py deleted file mode 100644 index 5dc1163e..00000000 --- a/src/humanloop/requests/image_chat_content.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .image_url import ImageUrlParams - - -class ImageChatContentParams(typing_extensions.TypedDict): - type: typing.Literal["image_url"] - image_url: ImageUrlParams - """ - The message's image content. - """ diff --git a/src/humanloop/requests/image_url.py b/src/humanloop/requests/image_url.py deleted file mode 100644 index 9d2a671b..00000000 --- a/src/humanloop/requests/image_url.py +++ /dev/null @@ -1,16 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.image_url_detail import ImageUrlDetail - - -class ImageUrlParams(typing_extensions.TypedDict): - url: str - """ - Either a URL of the image or the base64 encoded image data. - """ - - detail: typing_extensions.NotRequired[ImageUrlDetail] - """ - Specify the detail level of the image provided to the model. For more details see: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding - """ diff --git a/src/humanloop/requests/input_response.py b/src/humanloop/requests/input_response.py deleted file mode 100644 index ffc4874c..00000000 --- a/src/humanloop/requests/input_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class InputResponseParams(typing_extensions.TypedDict): - name: str - """ - Type of input. - """ diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py deleted file mode 100644 index 58c44162..00000000 --- a/src/humanloop/requests/linked_file_request.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class LinkedFileRequestParams(typing_extensions.TypedDict): - file_id: str - environment_id: typing_extensions.NotRequired[str] - version_id: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/linked_tool_response.py b/src/humanloop/requests/linked_tool_response.py deleted file mode 100644 index 646549d9..00000000 --- a/src/humanloop/requests/linked_tool_response.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class LinkedToolResponseParams(typing_extensions.TypedDict): - name: str - """ - Name for the tool referenced by the model. - """ - - description: str - """ - Description of the tool referenced by the model - """ - - strict: typing_extensions.NotRequired[bool] - """ - If true, forces the model to output json data in the structure of the parameters schema. - """ - - parameters: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ - """ - - id: str - """ - Unique identifier for the Tool linked. - """ - - version_id: str - """ - Unique identifier for the Tool Version linked. - """ diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py deleted file mode 100644 index 8e1d6b0e..00000000 --- a/src/humanloop/requests/list_agents.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .agent_response import AgentResponseParams - - -class ListAgentsParams(typing_extensions.TypedDict): - records: typing.Sequence[AgentResponseParams] - """ - The list of Agents. - """ diff --git a/src/humanloop/requests/list_datasets.py b/src/humanloop/requests/list_datasets.py deleted file mode 100644 index b49ea512..00000000 --- a/src/humanloop/requests/list_datasets.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .dataset_response import DatasetResponseParams - - -class ListDatasetsParams(typing_extensions.TypedDict): - records: typing.Sequence[DatasetResponseParams] - """ - The list of Datasets. - """ diff --git a/src/humanloop/requests/list_evaluators.py b/src/humanloop/requests/list_evaluators.py deleted file mode 100644 index 61d1aa46..00000000 --- a/src/humanloop/requests/list_evaluators.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .evaluator_response import EvaluatorResponseParams - - -class ListEvaluatorsParams(typing_extensions.TypedDict): - records: typing.Sequence[EvaluatorResponseParams] - """ - The list of Evaluators. - """ diff --git a/src/humanloop/requests/list_flows.py b/src/humanloop/requests/list_flows.py deleted file mode 100644 index 32b90142..00000000 --- a/src/humanloop/requests/list_flows.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .flow_response import FlowResponseParams - - -class ListFlowsParams(typing_extensions.TypedDict): - records: typing.Sequence[FlowResponseParams] - """ - The list of Flows. - """ diff --git a/src/humanloop/requests/list_prompts.py b/src/humanloop/requests/list_prompts.py deleted file mode 100644 index 717fd9eb..00000000 --- a/src/humanloop/requests/list_prompts.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .prompt_response import PromptResponseParams - - -class ListPromptsParams(typing_extensions.TypedDict): - records: typing.Sequence[PromptResponseParams] - """ - The list of Prompts. - """ diff --git a/src/humanloop/requests/list_tools.py b/src/humanloop/requests/list_tools.py deleted file mode 100644 index d12fe188..00000000 --- a/src/humanloop/requests/list_tools.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .tool_response import ToolResponseParams - - -class ListToolsParams(typing_extensions.TypedDict): - records: typing.Sequence[ToolResponseParams] - """ - The list of Tools. - """ diff --git a/src/humanloop/requests/llm_evaluator_request.py b/src/humanloop/requests/llm_evaluator_request.py deleted file mode 100644 index fd4c6d29..00000000 --- a/src/humanloop/requests/llm_evaluator_request.py +++ /dev/null @@ -1,49 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.evaluator_arguments_type import EvaluatorArgumentsType -from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum -from ..types.valence import Valence -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams -from .prompt_kernel_request import PromptKernelRequestParams - - -class LlmEvaluatorRequestParams(typing_extensions.TypedDict): - arguments_type: EvaluatorArgumentsType - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: EvaluatorReturnTypeEnum - """ - The type of the return value of the Evaluator. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] - """ - The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. - """ - - number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing_extensions.NotRequired[Valence] - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["llm"] - prompt: typing_extensions.NotRequired[PromptKernelRequestParams] - """ - The prompt parameters used to generate. - """ diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py deleted file mode 100644 index 3a1f56a0..00000000 --- a/src/humanloop/requests/log_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -if typing.TYPE_CHECKING: - from .agent_log_response import AgentLogResponseParams - from .evaluator_log_response import EvaluatorLogResponseParams - from .flow_log_response import FlowLogResponseParams - from .prompt_log_response import PromptLogResponseParams - from .tool_log_response import ToolLogResponseParams -LogResponseParams = typing.Union[ - "PromptLogResponseParams", - "ToolLogResponseParams", - "EvaluatorLogResponseParams", - "FlowLogResponseParams", - "AgentLogResponseParams", -] diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py deleted file mode 100644 index 2a9b1952..00000000 --- a/src/humanloop/requests/log_stream_response.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_log_stream_response import AgentLogStreamResponseParams -from .prompt_call_stream_response import PromptCallStreamResponseParams - -LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams] diff --git a/src/humanloop/requests/monitoring_evaluator_environment_request.py b/src/humanloop/requests/monitoring_evaluator_environment_request.py deleted file mode 100644 index b0505ada..00000000 --- a/src/humanloop/requests/monitoring_evaluator_environment_request.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class MonitoringEvaluatorEnvironmentRequestParams(typing_extensions.TypedDict): - evaluator_id: str - """ - Unique identifier for the Evaluator to be used for monitoring. - """ - - environment_id: str - """ - Unique identifier for the Environment. The Evaluator Version deployed to this Environment will be used for monitoring. - """ diff --git a/src/humanloop/requests/monitoring_evaluator_response.py b/src/humanloop/requests/monitoring_evaluator_response.py deleted file mode 100644 index c946fc65..00000000 --- a/src/humanloop/requests/monitoring_evaluator_response.py +++ /dev/null @@ -1,33 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.monitoring_evaluator_state import MonitoringEvaluatorState - -if typing.TYPE_CHECKING: - from .evaluator_response import EvaluatorResponseParams - from .version_reference_response import VersionReferenceResponseParams - - -class MonitoringEvaluatorResponseParams(typing_extensions.TypedDict): - version_reference: "VersionReferenceResponseParams" - """ - The Evaluator Version used for monitoring. This can be a specific Version by ID, or a Version deployed to an Environment. - """ - - version: typing_extensions.NotRequired["EvaluatorResponseParams"] - """ - The deployed Version. - """ - - state: MonitoringEvaluatorState - """ - The state of the Monitoring Evaluator. Either `active` or `inactive` - """ - - created_at: dt.datetime - updated_at: dt.datetime diff --git a/src/humanloop/requests/monitoring_evaluator_version_request.py b/src/humanloop/requests/monitoring_evaluator_version_request.py deleted file mode 100644 index aa37c3ea..00000000 --- a/src/humanloop/requests/monitoring_evaluator_version_request.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class MonitoringEvaluatorVersionRequestParams(typing_extensions.TypedDict): - evaluator_version_id: str - """ - Unique identifier for the Evaluator Version to be used for monitoring. - """ diff --git a/src/humanloop/requests/numeric_evaluator_stats_response.py b/src/humanloop/requests/numeric_evaluator_stats_response.py deleted file mode 100644 index a74784ce..00000000 --- a/src/humanloop/requests/numeric_evaluator_stats_response.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class NumericEvaluatorStatsResponseParams(typing_extensions.TypedDict): - """ - Base attributes for stats for an Evaluator Version-Evaluated Version pair - in the Evaluation. - """ - - evaluator_version_id: str - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int - """ - The total number of errored Evaluators for this Evaluator Version. - """ - - mean: typing_extensions.NotRequired[float] - sum: typing_extensions.NotRequired[float] - std: typing_extensions.NotRequired[float] - percentiles: typing.Dict[str, float] diff --git a/src/humanloop/requests/overall_stats.py b/src/humanloop/requests/overall_stats.py deleted file mode 100644 index fd42a922..00000000 --- a/src/humanloop/requests/overall_stats.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class OverallStatsParams(typing_extensions.TypedDict): - num_datapoints: int - """ - The total number of Datapoints in the Evaluation's Dataset Version. - """ - - total_logs: int - """ - The total number of Logs in the Evaluation. - """ - - total_evaluator_logs: int - """ - The total number of Evaluator Logs in the Evaluation. - """ - - total_human_evaluator_logs: int - """ - The total number of human Evaluator Logs in the Evaluation Report. - """ - - total_completed_human_evaluator_logs: int - """ - The total number of non-None human Evaluator Logs in the Evaluation Report. - """ diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py deleted file mode 100644 index af318b6a..00000000 --- a/src/humanloop/requests/paginated_data_agent_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .agent_response import AgentResponseParams - - -class PaginatedDataAgentResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[AgentResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_evaluation_log_response.py b/src/humanloop/requests/paginated_data_evaluation_log_response.py deleted file mode 100644 index 61439b55..00000000 --- a/src/humanloop/requests/paginated_data_evaluation_log_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .evaluation_log_response import EvaluationLogResponseParams - - -class PaginatedDataEvaluationLogResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[EvaluationLogResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_evaluator_response.py b/src/humanloop/requests/paginated_data_evaluator_response.py deleted file mode 100644 index 15294571..00000000 --- a/src/humanloop/requests/paginated_data_evaluator_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .evaluator_response import EvaluatorResponseParams - - -class PaginatedDataEvaluatorResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[EvaluatorResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_flow_response.py b/src/humanloop/requests/paginated_data_flow_response.py deleted file mode 100644 index 51db6406..00000000 --- a/src/humanloop/requests/paginated_data_flow_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .flow_response import FlowResponseParams - - -class PaginatedDataFlowResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[FlowResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_log_response.py b/src/humanloop/requests/paginated_data_log_response.py deleted file mode 100644 index 450f2d0e..00000000 --- a/src/humanloop/requests/paginated_data_log_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .log_response import LogResponseParams - - -class PaginatedDataLogResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[LogResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_prompt_response.py b/src/humanloop/requests/paginated_data_prompt_response.py deleted file mode 100644 index 62eae52b..00000000 --- a/src/humanloop/requests/paginated_data_prompt_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .prompt_response import PromptResponseParams - - -class PaginatedDataPromptResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[PromptResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_tool_response.py b/src/humanloop/requests/paginated_data_tool_response.py deleted file mode 100644 index 41eaf15a..00000000 --- a/src/humanloop/requests/paginated_data_tool_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .tool_response import ToolResponseParams - - -class PaginatedDataToolResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[ToolResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py deleted file mode 100644 index 5bde00b9..00000000 --- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, -) - - -class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams( - typing_extensions.TypedDict -): - records: typing.Sequence[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams - ] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py deleted file mode 100644 index 51db2493..00000000 --- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponseParams -from .dataset_response import DatasetResponseParams -from .evaluator_response import EvaluatorResponseParams -from .flow_response import FlowResponseParams -from .prompt_response import PromptResponseParams -from .tool_response import ToolResponseParams - -PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[ - PromptResponseParams, - ToolResponseParams, - DatasetResponseParams, - EvaluatorResponseParams, - FlowResponseParams, - AgentResponseParams, -] diff --git a/src/humanloop/requests/paginated_datapoint_response.py b/src/humanloop/requests/paginated_datapoint_response.py deleted file mode 100644 index 5ef2bae4..00000000 --- a/src/humanloop/requests/paginated_datapoint_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .datapoint_response import DatapointResponseParams - - -class PaginatedDatapointResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[DatapointResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_dataset_response.py b/src/humanloop/requests/paginated_dataset_response.py deleted file mode 100644 index ea5cd5b1..00000000 --- a/src/humanloop/requests/paginated_dataset_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .dataset_response import DatasetResponseParams - - -class PaginatedDatasetResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[DatasetResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/paginated_evaluation_response.py b/src/humanloop/requests/paginated_evaluation_response.py deleted file mode 100644 index 30916a81..00000000 --- a/src/humanloop/requests/paginated_evaluation_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .evaluation_response import EvaluationResponseParams - - -class PaginatedEvaluationResponseParams(typing_extensions.TypedDict): - records: typing.Sequence[EvaluationResponseParams] - page: int - size: int - total: int diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py deleted file mode 100644 index a6ed2b2f..00000000 --- a/src/humanloop/requests/populate_template_response.py +++ /dev/null @@ -1,229 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.template_language import TemplateLanguage -from ..types.user_response import UserResponse -from .environment_response import EnvironmentResponseParams -from .evaluator_aggregate import EvaluatorAggregateParams -from .input_response import InputResponseParams -from .linked_tool_response import LinkedToolResponseParams -from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams -from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams -from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams -from .populate_template_response_stop import PopulateTemplateResponseStopParams -from .populate_template_response_template import PopulateTemplateResponseTemplateParams -from .response_format import ResponseFormatParams -from .tool_function import ToolFunctionParams - - -class PopulateTemplateResponseParams(typing_extensions.TypedDict): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str - """ - Path of the Prompt, including the name, which is used as a unique identifier. - """ - - id: str - """ - Unique identifier for the Prompt. - """ - - directory_id: typing_extensions.NotRequired[str] - """ - ID of the directory that the file is in on Humanloop. - """ - - model: str - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing_extensions.NotRequired[ModelEndpoints] - """ - The provider model endpoint used. - """ - - template: typing_extensions.NotRequired[PopulateTemplateResponseTemplateParams] - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing_extensions.NotRequired[TemplateLanguage] - """ - The template language to use for rendering the template. - """ - - provider: typing_extensions.NotRequired[ModelProviders] - """ - The company providing the underlying model service. - """ - - max_tokens: typing_extensions.NotRequired[int] - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing_extensions.NotRequired[float] - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing_extensions.NotRequired[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing_extensions.NotRequired[PopulateTemplateResponseStopParams] - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing_extensions.NotRequired[int] - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing_extensions.NotRequired[ResponseFormatParams] - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams] - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] - """ - The tool specification that the model can choose to call if Tool calling is supported. - """ - - linked_tools: typing_extensions.NotRequired[typing.Sequence[LinkedToolResponseParams]] - """ - The tools linked to your prompt that the model can call. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - version_name: typing_extensions.NotRequired[str] - """ - Unique name for the Prompt version. Version names must be unique for a given Prompt. - """ - - version_description: typing_extensions.NotRequired[str] - """ - Description of the version, e.g., the changes made in this version. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the Prompt. - """ - - tags: typing_extensions.NotRequired[typing.Sequence[str]] - """ - List of tags associated with the file. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the file. - """ - - name: str - """ - Name of the Prompt. - """ - - schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The JSON schema for the Prompt. - """ - - version_id: str - """ - Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. - """ - - type: typing_extensions.NotRequired[typing.Literal["prompt"]] - environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] - """ - The list of environments the Prompt Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - """ - The user who created the Prompt. - """ - - last_used_at: dt.datetime - version_logs_count: int - """ - The number of logs that have been generated for this Prompt Version - """ - - total_logs_count: int - """ - The number of logs that have been generated across all Prompt Versions - """ - - inputs: typing.Sequence[InputResponseParams] - """ - Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. - """ - - evaluators: typing_extensions.NotRequired[typing.Sequence[MonitoringEvaluatorResponseParams]] - """ - Evaluators that have been attached to this Prompt that are used for monitoring logs. - """ - - evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] - """ - Aggregation of Evaluator results for the Prompt Version. - """ - - raw_file_content: typing_extensions.NotRequired[str] - """ - The raw content of the Prompt. Corresponds to the .prompt file. - """ - - populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams] - """ - The template populated with the input values you provided in the request. Returns None if no template exists. - """ diff --git a/src/humanloop/requests/populate_template_response_populated_template.py b/src/humanloop/requests/populate_template_response_populated_template.py deleted file mode 100644 index 79bc7505..00000000 --- a/src/humanloop/requests/populate_template_response_populated_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessageParams - -PopulateTemplateResponsePopulatedTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py deleted file mode 100644 index 9140180f..00000000 --- a/src/humanloop/requests/populate_template_response_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort - -PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/populate_template_response_stop.py b/src/humanloop/requests/populate_template_response_stop.py deleted file mode 100644 index d4f19110..00000000 --- a/src/humanloop/requests/populate_template_response_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PopulateTemplateResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/populate_template_response_template.py b/src/humanloop/requests/populate_template_response_template.py deleted file mode 100644 index 7a9ba8e9..00000000 --- a/src/humanloop/requests/populate_template_response_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessageParams - -PopulateTemplateResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/prompt_call_log_response.py b/src/humanloop/requests/prompt_call_log_response.py deleted file mode 100644 index 4dff347b..00000000 --- a/src/humanloop/requests/prompt_call_log_response.py +++ /dev/null @@ -1,77 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from .chat_message import ChatMessageParams - - -class PromptCallLogResponseParams(typing_extensions.TypedDict): - """ - Sample specific response details for a Prompt call - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the provider. - """ - - prompt_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing_extensions.NotRequired[int] - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing_extensions.NotRequired[str] - """ - Reason the generation finished. - """ - - index: int - """ - The index of the sample in the batch. - """ diff --git a/src/humanloop/requests/prompt_call_response.py b/src/humanloop/requests/prompt_call_response.py deleted file mode 100644 index 14ff4609..00000000 --- a/src/humanloop/requests/prompt_call_response.py +++ /dev/null @@ -1,111 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .chat_message import ChatMessageParams -from .prompt_call_log_response import PromptCallLogResponseParams -from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams -from .prompt_response import PromptResponseParams - - -class PromptCallResponseParams(typing_extensions.TypedDict): - """ - Response model for a Prompt call with potentially multiple log samples. - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing_extensions.NotRequired[PromptCallResponseToolChoiceParams] - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - prompt: PromptResponseParams - """ - Prompt used to generate the Log. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - ID of the log. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - ID of the Trace containing the Prompt Call Log. - """ - - logs: typing.Sequence[PromptCallLogResponseParams] - """ - The logs generated by the Prompt call. - """ diff --git a/src/humanloop/requests/prompt_call_response_tool_choice.py b/src/humanloop/requests/prompt_call_response_tool_choice.py deleted file mode 100644 index 63fd7183..00000000 --- a/src/humanloop/requests/prompt_call_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoiceParams - -PromptCallResponseToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/requests/prompt_call_stream_response.py b/src/humanloop/requests/prompt_call_stream_response.py deleted file mode 100644 index 9d3e5651..00000000 --- a/src/humanloop/requests/prompt_call_stream_response.py +++ /dev/null @@ -1,92 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt - -import typing_extensions -from .chat_message import ChatMessageParams - - -class PromptCallStreamResponseParams(typing_extensions.TypedDict): - """ - Response model for calling Prompt in streaming mode. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the provider. - """ - - prompt_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing_extensions.NotRequired[int] - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing_extensions.NotRequired[str] - """ - Reason the generation finished. - """ - - index: int - """ - The index of the sample in the batch. - """ - - id: str - """ - ID of the log. - """ - - prompt_id: str - """ - ID of the Prompt the log belongs to. - """ - - version_id: str - """ - ID of the specific version of the Prompt. - """ diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py deleted file mode 100644 index 48d8db46..00000000 --- a/src/humanloop/requests/prompt_kernel_request.py +++ /dev/null @@ -1,116 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.template_language import TemplateLanguage -from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams -from .prompt_kernel_request_stop import PromptKernelRequestStopParams -from .prompt_kernel_request_template import PromptKernelRequestTemplateParams -from .response_format import ResponseFormatParams -from .tool_function import ToolFunctionParams - - -class PromptKernelRequestParams(typing_extensions.TypedDict): - """ - Base class used by both PromptKernelRequest and AgentKernelRequest. - - Contains the consistent Prompt-related fields. - """ - - model: str - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing_extensions.NotRequired[ModelEndpoints] - """ - The provider model endpoint used. - """ - - template: typing_extensions.NotRequired[PromptKernelRequestTemplateParams] - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing_extensions.NotRequired[TemplateLanguage] - """ - The template language to use for rendering the template. - """ - - provider: typing_extensions.NotRequired[ModelProviders] - """ - The company providing the underlying model service. - """ - - max_tokens: typing_extensions.NotRequired[int] - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing_extensions.NotRequired[float] - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing_extensions.NotRequired[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing_extensions.NotRequired[PromptKernelRequestStopParams] - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing_extensions.NotRequired[int] - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing_extensions.NotRequired[ResponseFormatParams] - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams] - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] - """ - The tool specification that the model can choose to call if Tool calling is supported. - """ - - linked_tools: typing_extensions.NotRequired[typing.Sequence[str]] - """ - The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py deleted file mode 100644 index 81df2957..00000000 --- a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort - -PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/prompt_kernel_request_stop.py b/src/humanloop/requests/prompt_kernel_request_stop.py deleted file mode 100644 index c3db9e58..00000000 --- a/src/humanloop/requests/prompt_kernel_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PromptKernelRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/prompt_kernel_request_template.py b/src/humanloop/requests/prompt_kernel_request_template.py deleted file mode 100644 index aa389a04..00000000 --- a/src/humanloop/requests/prompt_kernel_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessageParams - -PromptKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/prompt_log_response.py b/src/humanloop/requests/prompt_log_response.py deleted file mode 100644 index 6147adec..00000000 --- a/src/humanloop/requests/prompt_log_response.py +++ /dev/null @@ -1,201 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .chat_message import ChatMessageParams -from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams -from .prompt_response import PromptResponseParams - -if typing.TYPE_CHECKING: - from .evaluator_log_response import EvaluatorLogResponseParams - from .log_response import LogResponseParams - - -class PromptLogResponseParams(typing_extensions.TypedDict): - """ - General request for creating a Log - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the provider. - """ - - prompt_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing_extensions.NotRequired[int] - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing_extensions.NotRequired[int] - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing_extensions.NotRequired[float] - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing_extensions.NotRequired[str] - """ - Reason the generation finished. - """ - - messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing_extensions.NotRequired[PromptLogResponseToolChoiceParams] - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - prompt: PromptResponseParams - """ - Prompt used to generate the Log. - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] - """ - Logs nested under this Log in the Trace. - """ diff --git a/src/humanloop/requests/prompt_log_response_tool_choice.py b/src/humanloop/requests/prompt_log_response_tool_choice.py deleted file mode 100644 index 8e8ad6dd..00000000 --- a/src/humanloop/requests/prompt_log_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoiceParams - -PromptLogResponseToolChoiceParams = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams -] diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py deleted file mode 100644 index 7a1b4493..00000000 --- a/src/humanloop/requests/prompt_response.py +++ /dev/null @@ -1,227 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.model_endpoints import ModelEndpoints -from ..types.model_providers import ModelProviders -from ..types.template_language import TemplateLanguage -from ..types.user_response import UserResponse -from .environment_response import EnvironmentResponseParams -from .evaluator_aggregate import EvaluatorAggregateParams -from .input_response import InputResponseParams -from .linked_tool_response import LinkedToolResponseParams -from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams -from .prompt_response_stop import PromptResponseStopParams -from .prompt_response_template import PromptResponseTemplateParams -from .response_format import ResponseFormatParams -from .tool_function import ToolFunctionParams - -if typing.TYPE_CHECKING: - from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams - - -class PromptResponseParams(typing_extensions.TypedDict): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str - """ - Path of the Prompt, including the name, which is used as a unique identifier. - """ - - id: str - """ - Unique identifier for the Prompt. - """ - - directory_id: typing_extensions.NotRequired[str] - """ - ID of the directory that the file is in on Humanloop. - """ - - model: str - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing_extensions.NotRequired[ModelEndpoints] - """ - The provider model endpoint used. - """ - - template: typing_extensions.NotRequired[PromptResponseTemplateParams] - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing_extensions.NotRequired[TemplateLanguage] - """ - The template language to use for rendering the template. - """ - - provider: typing_extensions.NotRequired[ModelProviders] - """ - The company providing the underlying model service. - """ - - max_tokens: typing_extensions.NotRequired[int] - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing_extensions.NotRequired[float] - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing_extensions.NotRequired[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing_extensions.NotRequired[PromptResponseStopParams] - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing_extensions.NotRequired[float] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing_extensions.NotRequired[int] - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing_extensions.NotRequired[ResponseFormatParams] - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams] - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] - """ - The tool specification that the model can choose to call if Tool calling is supported. - """ - - linked_tools: typing_extensions.NotRequired[typing.Sequence[LinkedToolResponseParams]] - """ - The tools linked to your prompt that the model can call. - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - version_name: typing_extensions.NotRequired[str] - """ - Unique name for the Prompt version. Version names must be unique for a given Prompt. - """ - - version_description: typing_extensions.NotRequired[str] - """ - Description of the version, e.g., the changes made in this version. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the Prompt. - """ - - tags: typing_extensions.NotRequired[typing.Sequence[str]] - """ - List of tags associated with the file. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the file. - """ - - name: str - """ - Name of the Prompt. - """ - - schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The JSON schema for the Prompt. - """ - - version_id: str - """ - Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. - """ - - type: typing_extensions.NotRequired[typing.Literal["prompt"]] - environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] - """ - The list of environments the Prompt Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - """ - The user who created the Prompt. - """ - - last_used_at: dt.datetime - version_logs_count: int - """ - The number of logs that have been generated for this Prompt Version - """ - - total_logs_count: int - """ - The number of logs that have been generated across all Prompt Versions - """ - - inputs: typing.Sequence[InputResponseParams] - """ - Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. - """ - - evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] - """ - Evaluators that have been attached to this Prompt that are used for monitoring logs. - """ - - evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] - """ - Aggregation of Evaluator results for the Prompt Version. - """ - - raw_file_content: typing_extensions.NotRequired[str] - """ - The raw content of the Prompt. Corresponds to the .prompt file. - """ diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py deleted file mode 100644 index 55d82486..00000000 --- a/src/humanloop/requests/prompt_response_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort - -PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/prompt_response_stop.py b/src/humanloop/requests/prompt_response_stop.py deleted file mode 100644 index c1545617..00000000 --- a/src/humanloop/requests/prompt_response_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PromptResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/prompt_response_template.py b/src/humanloop/requests/prompt_response_template.py deleted file mode 100644 index b9f6deb4..00000000 --- a/src/humanloop/requests/prompt_response_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessageParams - -PromptResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/provider_api_keys.py b/src/humanloop/requests/provider_api_keys.py deleted file mode 100644 index c37649ea..00000000 --- a/src/humanloop/requests/provider_api_keys.py +++ /dev/null @@ -1,16 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..core.serialization import FieldMetadata - - -class ProviderApiKeysParams(typing_extensions.TypedDict): - openai: typing_extensions.NotRequired[str] - ai_21: typing_extensions.NotRequired[typing_extensions.Annotated[str, FieldMetadata(alias="ai21")]] - mock: typing_extensions.NotRequired[str] - anthropic: typing_extensions.NotRequired[str] - deepseek: typing_extensions.NotRequired[str] - bedrock: typing_extensions.NotRequired[str] - cohere: typing_extensions.NotRequired[str] - openai_azure: typing_extensions.NotRequired[str] - openai_azure_endpoint: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/response_format.py b/src/humanloop/requests/response_format.py deleted file mode 100644 index 1fce8531..00000000 --- a/src/humanloop/requests/response_format.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.response_format_type import ResponseFormatType - - -class ResponseFormatParams(typing_extensions.TypedDict): - """ - Response format of the model. - """ - - type: ResponseFormatType - json_schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The JSON schema of the response format if type is json_schema. - """ diff --git a/src/humanloop/requests/run_stats_response.py b/src/humanloop/requests/run_stats_response.py deleted file mode 100644 index 6bdbf08e..00000000 --- a/src/humanloop/requests/run_stats_response.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from ..types.evaluation_status import EvaluationStatus -from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams - - -class RunStatsResponseParams(typing_extensions.TypedDict): - """ - Stats for a Run in the Evaluation. - """ - - run_id: str - """ - Unique identifier for the Run. - """ - - version_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the evaluated Version. - """ - - batch_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - num_logs: int - """ - The total number of existing Logs in this Run. - """ - - evaluator_stats: typing.Sequence[RunStatsResponseEvaluatorStatsItemParams] - """ - Stats for each Evaluator Version applied to this Run. - """ - - status: EvaluationStatus - """ - The current status of the Run. - """ diff --git a/src/humanloop/requests/run_stats_response_evaluator_stats_item.py b/src/humanloop/requests/run_stats_response_evaluator_stats_item.py deleted file mode 100644 index 09231c9b..00000000 --- a/src/humanloop/requests/run_stats_response_evaluator_stats_item.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams -from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams -from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams -from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams - -RunStatsResponseEvaluatorStatsItemParams = typing.Union[ - NumericEvaluatorStatsResponseParams, - BooleanEvaluatorStatsResponseParams, - SelectEvaluatorStatsResponseParams, - TextEvaluatorStatsResponseParams, -] diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py deleted file mode 100644 index 3091de87..00000000 --- a/src/humanloop/requests/run_version_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponseParams -from .evaluator_response import EvaluatorResponseParams -from .flow_response import FlowResponseParams -from .prompt_response import PromptResponseParams -from .tool_response import ToolResponseParams - -RunVersionResponseParams = typing.Union[ - PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams -] diff --git a/src/humanloop/requests/select_evaluator_stats_response.py b/src/humanloop/requests/select_evaluator_stats_response.py deleted file mode 100644 index 7c77198a..00000000 --- a/src/humanloop/requests/select_evaluator_stats_response.py +++ /dev/null @@ -1,41 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class SelectEvaluatorStatsResponseParams(typing_extensions.TypedDict): - """ - Also used for 'multi_select' Evaluator versions - """ - - evaluator_version_id: str - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int - """ - The total number of errored Evaluators for this Evaluator Version. - """ - - num_judgments_per_option: typing.Dict[str, int] - """ - The total number of Evaluator judgments for this Evaluator Version. This is a mapping of the option name to the number of judgments for that option. - """ diff --git a/src/humanloop/requests/text_chat_content.py b/src/humanloop/requests/text_chat_content.py deleted file mode 100644 index fa9f5437..00000000 --- a/src/humanloop/requests/text_chat_content.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class TextChatContentParams(typing_extensions.TypedDict): - type: typing.Literal["text"] - text: str - """ - The message's text content. - """ diff --git a/src/humanloop/requests/text_evaluator_stats_response.py b/src/humanloop/requests/text_evaluator_stats_response.py deleted file mode 100644 index 8f0f358d..00000000 --- a/src/humanloop/requests/text_evaluator_stats_response.py +++ /dev/null @@ -1,35 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class TextEvaluatorStatsResponseParams(typing_extensions.TypedDict): - """ - Base attributes for stats for an Evaluator Version-Evaluated Version pair - in the Evaluation. - """ - - evaluator_version_id: str - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int - """ - The total number of errored Evaluators for this Evaluator Version. - """ diff --git a/src/humanloop/requests/tool_call.py b/src/humanloop/requests/tool_call.py deleted file mode 100644 index d491b49b..00000000 --- a/src/humanloop/requests/tool_call.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.chat_tool_type import ChatToolType -from .function_tool import FunctionToolParams - - -class ToolCallParams(typing_extensions.TypedDict): - """ - A tool call to be made. - """ - - id: str - type: ChatToolType - function: FunctionToolParams diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py deleted file mode 100644 index e00069de..00000000 --- a/src/humanloop/requests/tool_call_response.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .evaluator_log_response import EvaluatorLogResponseParams -from .log_response import LogResponseParams -from .tool_response import ToolResponseParams - - -class ToolCallResponseParams(typing_extensions.TypedDict): - """ - Response model for a Tool call. - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - tool: ToolResponseParams - """ - Tool used to generate the Log. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - ID of the log. - """ - - evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - ID of the Trace containing the Tool Call Log. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] - """ - Logs nested under this Log in the Trace. - """ diff --git a/src/humanloop/requests/tool_choice.py b/src/humanloop/requests/tool_choice.py deleted file mode 100644 index 22ab3251..00000000 --- a/src/humanloop/requests/tool_choice.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.chat_tool_type import ChatToolType -from .function_tool_choice import FunctionToolChoiceParams - - -class ToolChoiceParams(typing_extensions.TypedDict): - """ - Tool choice to force the model to use a tool. - """ - - type: ChatToolType - function: FunctionToolChoiceParams diff --git a/src/humanloop/requests/tool_function.py b/src/humanloop/requests/tool_function.py deleted file mode 100644 index 9132b10e..00000000 --- a/src/humanloop/requests/tool_function.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class ToolFunctionParams(typing_extensions.TypedDict): - name: str - """ - Name for the tool referenced by the model. - """ - - description: str - """ - Description of the tool referenced by the model - """ - - strict: typing_extensions.NotRequired[bool] - """ - If true, forces the model to output json data in the structure of the parameters schema. - """ - - parameters: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ - """ diff --git a/src/humanloop/requests/tool_kernel_request.py b/src/humanloop/requests/tool_kernel_request.py deleted file mode 100644 index 48f8f5b1..00000000 --- a/src/humanloop/requests/tool_kernel_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .tool_function import ToolFunctionParams - - -class ToolKernelRequestParams(typing_extensions.TypedDict): - function: typing_extensions.NotRequired[ToolFunctionParams] - """ - Callable function specification of the Tool shown to the model for tool calling. - """ - - source_code: typing_extensions.NotRequired[str] - """ - Code source of the Tool. - """ - - setup_values: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - """ diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py deleted file mode 100644 index f4be5ad0..00000000 --- a/src/humanloop/requests/tool_log_response.py +++ /dev/null @@ -1,156 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.log_status import LogStatus -from .chat_message import ChatMessageParams -from .tool_response import ToolResponseParams - -if typing.TYPE_CHECKING: - from .evaluator_log_response import EvaluatorLogResponseParams - from .log_response import LogResponseParams - - -class ToolLogResponseParams(typing_extensions.TypedDict): - """ - General request for creating a Log - """ - - start_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event started. - """ - - end_time: typing_extensions.NotRequired[dt.datetime] - """ - When the logged event ended. - """ - - output: typing_extensions.NotRequired[str] - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing_extensions.NotRequired[dt.datetime] - """ - User defined timestamp for when the log was created. - """ - - error: typing_extensions.NotRequired[str] - """ - Error message if the log is an error. - """ - - provider_latency: typing_extensions.NotRequired[float] - """ - Duration of the logged event in seconds. - """ - - stdout: typing_extensions.NotRequired[str] - """ - Captured log and debug statements. - """ - - provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw request sent to provider. - """ - - provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Raw response received the provider. - """ - - inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - The inputs passed to the prompt template. - """ - - source: typing_extensions.NotRequired[str] - """ - Identifies where the model was called from. - """ - - metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Any additional metadata to record. - """ - - log_status: typing_extensions.NotRequired[LogStatus] - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing_extensions.NotRequired[str] - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing_extensions.NotRequired[str] - """ - End-user ID related to the Log. - """ - - environment: typing_extensions.NotRequired[str] - """ - The name of the Environment the Log is associated to. - """ - - save: typing_extensions.NotRequired[bool] - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing_extensions.NotRequired[str] - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing_extensions.NotRequired[str] - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing_extensions.NotRequired[str] - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] - """ - Logs nested under this Log in the Trace. - """ - - tool: ToolResponseParams - """ - Tool used to generate the Log. - """ - - output_message: typing_extensions.NotRequired[ChatMessageParams] - """ - The message returned by the Tool. - """ diff --git a/src/humanloop/requests/tool_response.py b/src/humanloop/requests/tool_response.py deleted file mode 100644 index ea4ab1df..00000000 --- a/src/humanloop/requests/tool_response.py +++ /dev/null @@ -1,145 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import typing_extensions -from ..types.files_tool_type import FilesToolType -from ..types.user_response import UserResponse -from .environment_response import EnvironmentResponseParams -from .evaluator_aggregate import EvaluatorAggregateParams -from .input_response import InputResponseParams -from .tool_function import ToolFunctionParams - -if typing.TYPE_CHECKING: - from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams - - -class ToolResponseParams(typing_extensions.TypedDict): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str - """ - Path of the Tool, including the name, which is used as a unique identifier. - """ - - id: str - """ - Unique identifier for the Tool. - """ - - directory_id: typing_extensions.NotRequired[str] - """ - ID of the directory that the file is in on Humanloop. - """ - - function: typing_extensions.NotRequired[ToolFunctionParams] - """ - Callable function specification of the Tool shown to the model for tool calling. - """ - - source_code: typing_extensions.NotRequired[str] - """ - Code source of the Tool. - """ - - setup_values: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - """ - - attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] - """ - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - """ - - tool_type: typing_extensions.NotRequired[FilesToolType] - """ - Type of Tool. - """ - - version_name: typing_extensions.NotRequired[str] - """ - Unique identifier for this Tool version. Each Tool can only have one version with a given name. - """ - - version_description: typing_extensions.NotRequired[str] - """ - Description of the Version. - """ - - name: str - """ - Name of the Tool, which is used as a unique identifier. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the Tool. - """ - - readme: typing_extensions.NotRequired[str] - """ - Long description of the file. - """ - - tags: typing_extensions.NotRequired[typing.Sequence[str]] - """ - List of tags associated with the file. - """ - - version_id: str - """ - Unique identifier for the specific Tool Version. If no query params provided, the default deployed Tool Version is returned. - """ - - type: typing_extensions.NotRequired[typing.Literal["tool"]] - environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] - """ - The list of environments the Tool Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing_extensions.NotRequired[UserResponse] - """ - The user who created the Tool. - """ - - last_used_at: dt.datetime - version_logs_count: int - """ - The number of logs that have been generated for this Tool Version - """ - - total_logs_count: int - """ - The number of logs that have been generated across all Tool Versions - """ - - inputs: typing.Sequence[InputResponseParams] - """ - Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Tool template. - """ - - evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] - """ - Evaluators that have been attached to this Tool that are used for monitoring logs. - """ - - signature: typing_extensions.NotRequired[str] - """ - Signature of the Tool. - """ - - evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] - """ - Aggregation of Evaluator results for the Tool Version. - """ diff --git a/src/humanloop/requests/update_version_request.py b/src/humanloop/requests/update_version_request.py deleted file mode 100644 index 204b3b37..00000000 --- a/src/humanloop/requests/update_version_request.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class UpdateVersionRequestParams(typing_extensions.TypedDict): - name: typing_extensions.NotRequired[str] - """ - Name of the version. - """ - - description: typing_extensions.NotRequired[str] - """ - Description of the version. - """ diff --git a/src/humanloop/requests/validation_error.py b/src/humanloop/requests/validation_error.py deleted file mode 100644 index fba151d8..00000000 --- a/src/humanloop/requests/validation_error.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .validation_error_loc_item import ValidationErrorLocItemParams - - -class ValidationErrorParams(typing_extensions.TypedDict): - loc: typing.Sequence[ValidationErrorLocItemParams] - msg: str - type: str diff --git a/src/humanloop/requests/validation_error_loc_item.py b/src/humanloop/requests/validation_error_loc_item.py deleted file mode 100644 index b6ab5a3d..00000000 --- a/src/humanloop/requests/validation_error_loc_item.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ValidationErrorLocItemParams = typing.Union[str, int] diff --git a/src/humanloop/requests/version_deployment_response.py b/src/humanloop/requests/version_deployment_response.py deleted file mode 100644 index fdd17544..00000000 --- a/src/humanloop/requests/version_deployment_response.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import typing_extensions -from .environment_response import EnvironmentResponseParams - -if typing.TYPE_CHECKING: - from .version_deployment_response_file import VersionDeploymentResponseFileParams - - -class VersionDeploymentResponseParams(typing_extensions.TypedDict): - """ - A variable reference to the Version deployed to an Environment - """ - - file: "VersionDeploymentResponseFileParams" - """ - The File that the deployed Version belongs to. - """ - - environment: EnvironmentResponseParams - """ - The Environment that the Version is deployed to. - """ - - type: typing.Literal["environment"] diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py deleted file mode 100644 index 74e04ab8..00000000 --- a/src/humanloop/requests/version_deployment_response_file.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .dataset_response import DatasetResponseParams - -if typing.TYPE_CHECKING: - from .agent_response import AgentResponseParams - from .evaluator_response import EvaluatorResponseParams - from .flow_response import FlowResponseParams - from .prompt_response import PromptResponseParams - from .tool_response import ToolResponseParams -VersionDeploymentResponseFileParams = typing.Union[ - "PromptResponseParams", - "ToolResponseParams", - DatasetResponseParams, - "EvaluatorResponseParams", - "FlowResponseParams", - "AgentResponseParams", -] diff --git a/src/humanloop/requests/version_id.py b/src/humanloop/requests/version_id.py deleted file mode 100644 index 102b6b10..00000000 --- a/src/humanloop/requests/version_id.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class VersionIdParams(typing_extensions.TypedDict): - version_id: str - """ - Unique identifier for the Version. - """ diff --git a/src/humanloop/requests/version_id_response.py b/src/humanloop/requests/version_id_response.py deleted file mode 100644 index af4d3226..00000000 --- a/src/humanloop/requests/version_id_response.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import typing_extensions - -if typing.TYPE_CHECKING: - from .version_id_response_version import VersionIdResponseVersionParams - - -class VersionIdResponseParams(typing_extensions.TypedDict): - """ - A reference to a specific Version by its ID - """ - - version: "VersionIdResponseVersionParams" - """ - The specific Version being referenced. - """ - - type: typing.Literal["version"] diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py deleted file mode 100644 index ac1f96e2..00000000 --- a/src/humanloop/requests/version_id_response_version.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .dataset_response import DatasetResponseParams - -if typing.TYPE_CHECKING: - from .agent_response import AgentResponseParams - from .evaluator_response import EvaluatorResponseParams - from .flow_response import FlowResponseParams - from .prompt_response import PromptResponseParams - from .tool_response import ToolResponseParams -VersionIdResponseVersionParams = typing.Union[ - "PromptResponseParams", - "ToolResponseParams", - DatasetResponseParams, - "EvaluatorResponseParams", - "FlowResponseParams", - "AgentResponseParams", -] diff --git a/src/humanloop/requests/version_reference_response.py b/src/humanloop/requests/version_reference_response.py deleted file mode 100644 index 4b80e4cd..00000000 --- a/src/humanloop/requests/version_reference_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -if typing.TYPE_CHECKING: - from .version_deployment_response import VersionDeploymentResponseParams - from .version_id_response import VersionIdResponseParams -VersionReferenceResponseParams = typing.Union["VersionDeploymentResponseParams", "VersionIdResponseParams"] diff --git a/src/humanloop/requests/version_stats_response.py b/src/humanloop/requests/version_stats_response.py deleted file mode 100644 index 1bb18233..00000000 --- a/src/humanloop/requests/version_stats_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams - - -class VersionStatsResponseParams(typing_extensions.TypedDict): - version_id: str - """ - Unique identifier for the evaluated Version. - """ - - batch_id: typing_extensions.NotRequired[str] - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - num_logs: int - """ - The total number of existing Logs in this Run. - """ - - evaluator_version_stats: typing.Sequence[VersionStatsResponseEvaluatorVersionStatsItemParams] - """ - Stats for each Evaluator Version applied to this Run. - """ diff --git a/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py b/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py deleted file mode 100644 index 2bbeb15c..00000000 --- a/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams -from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams -from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams -from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams - -VersionStatsResponseEvaluatorVersionStatsItemParams = typing.Union[ - NumericEvaluatorStatsResponseParams, - BooleanEvaluatorStatsResponseParams, - SelectEvaluatorStatsResponseParams, - TextEvaluatorStatsResponseParams, -] diff --git a/src/humanloop/tools/__init__.py b/src/humanloop/tools/__init__.py deleted file mode 100644 index 5cde0202..00000000 --- a/src/humanloop/tools/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py deleted file mode 100644 index d8449a7c..00000000 --- a/src/humanloop/tools/client.py +++ /dev/null @@ -1,2101 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.pagination import AsyncPager, SyncPager -from ..core.request_options import RequestOptions -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams -from ..requests.tool_function import ToolFunctionParams -from ..requests.tool_kernel_request import ToolKernelRequestParams -from ..types.create_tool_log_response import CreateToolLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_environment_variable_request import FileEnvironmentVariableRequest -from ..types.file_sort_by import FileSortBy -from ..types.files_tool_type import FilesToolType -from ..types.list_tools import ListTools -from ..types.log_response import LogResponse -from ..types.log_status import LogStatus -from ..types.sort_order import SortOrder -from ..types.tool_call_response import ToolCallResponse -from ..types.tool_response import ToolResponse -from .raw_client import AsyncRawToolsClient, RawToolsClient - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class ToolsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._raw_client = RawToolsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> RawToolsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - RawToolsClient - """ - return self._raw_client - - def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolCallResponse: - """ - Call a Tool. - - Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to call. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to call. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolCallResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.call() - """ - _response = self._raw_client.call( - version_id=version_id, - environment=environment, - path=path, - id=id, - tool=tool, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - tool_call_request_environment=tool_call_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateToolLogResponse: - """ - Log to a Tool. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool, if not we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateToolLogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' - , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} - , 'required': ['a', 'b'] - }}}, inputs={'a': 5 - , 'b': 7 - }, output='35', ) - """ - _response = self._raw_client.log( - version_id=version_id, - environment=environment, - path=path, - id=id, - tool=tool, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - tool_log_request_environment=tool_log_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - def update( - self, - id: str, - log_id: str, - *, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> LogResponse: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LogResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.update(id='id', log_id='log_id', ) - """ - _response = self._raw_client.update( - id, - log_id, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[ToolResponse]: - """ - Get a list of all Tools. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Tools to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Tool name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Tools by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[ToolResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - response = client.tools.list(size=1, ) - for item in response: - yield item - # alternatively, you can paginate page-by-page - for page in response.iter_pages(): - yield page - """ - return self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - def upsert( - self, - *, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - function: typing.Optional[ToolFunctionParams] = OMIT, - source_code: typing.Optional[str] = OMIT, - setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tool_type: typing.Optional[FilesToolType] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Create a Tool or update it with a new version if it already exists. - - Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Tool - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - function : typing.Optional[ToolFunctionParams] - Callable function specification of the Tool shown to the model for tool calling. - - source_code : typing.Optional[str] - Code source of the Tool. - - setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - - tool_type : typing.Optional[FilesToolType] - Type of Tool. - - version_name : typing.Optional[str] - Unique identifier for this Tool version. Each Tool can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.upsert(path='math-tool', function={'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' - , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} - , 'required': ['a', 'b'] - }}, version_name='math-tool-v1', version_description='Simple math tool that multiplies two numbers', ) - """ - _response = self._raw_client.upsert( - path=path, - id=id, - function=function, - source_code=source_code, - setup_values=setup_values, - attributes=attributes, - tool_type=tool_type, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Retrieve the Tool with the given ID. - - By default, the deployed version of the Tool is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : typing.Optional[str] - A specific Version ID of the Tool to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.get(id='tl_789ghi', ) - """ - _response = self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Tool with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.delete(id='tl_789ghi', ) - """ - _response = self._raw_client.delete(id, request_options=request_options) - return _response.data - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Move the Tool to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - path : typing.Optional[str] - Path of the Tool including the Tool name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Tool, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.move(id='tl_789ghi', path='new directory/new name', ) - """ - _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListTools: - """ - Get a list of all the versions of a Tool. - - Parameters - ---------- - id : str - Unique identifier for the Tool. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListTools - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.list_versions(id='tl_789ghi', ) - """ - _response = self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - def delete_tool_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.delete_tool_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.delete_tool_version(id, version_id, request_options=request_options) - return _response.data - - def update_tool_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Update the name or description of the Tool version. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.update_tool_version(id='id', version_id='version_id', ) - """ - _response = self._raw_client.update_tool_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponse: - """ - Deploy Tool to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.set_deployment(id='tl_789ghi', environment_id='staging', version_id='tv_012jkl', ) - """ - _response = self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Tool from the Environment. - - Remove the deployed version for the specified Environment. This Tool - will no longer be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.remove_deployment(id='tl_789ghi', environment_id='staging', ) - """ - _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.list_environments(id='tl_789ghi', ) - """ - _response = self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Activate and deactivate Evaluators for monitoring the Tool. - - An activated Evaluator will automatically be run on all new Logs - within the Tool for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.update_monitoring(id='tl_789ghi', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) - """ - _response = self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - def get_environment_variables( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentVariableRequest]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentVariableRequest] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.get_environment_variables(id='id', ) - """ - _response = self._raw_client.get_environment_variables(id, request_options=request_options) - return _response.data - - def add_environment_variable( - self, - id: str, - *, - request: typing.Sequence[FileEnvironmentVariableRequestParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.List[FileEnvironmentVariableRequest]: - """ - Add an environment variable to a Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request : typing.Sequence[FileEnvironmentVariableRequestParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentVariableRequest] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.add_environment_variable(id='id', request=[{'name': 'name', 'value': 'value'}], ) - """ - _response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options) - return _response.data - - def delete_environment_variable( - self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentVariableRequest]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - name : str - Name of the Environment Variable to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentVariableRequest] - Successful Response - - Examples - -------- - from humanloop import Humanloop - client = Humanloop(api_key="YOUR_API_KEY", ) - client.tools.delete_environment_variable(id='id', name='name', ) - """ - _response = self._raw_client.delete_environment_variable(id, name, request_options=request_options) - return _response.data - - -class AsyncToolsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._raw_client = AsyncRawToolsClient(client_wrapper=client_wrapper) - - @property - def with_raw_response(self) -> AsyncRawToolsClient: - """ - Retrieves a raw implementation of this client that returns raw responses. - - Returns - ------- - AsyncRawToolsClient - """ - return self._raw_client - - async def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolCallResponse: - """ - Call a Tool. - - Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to call. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to call. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolCallResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.call() - asyncio.run(main()) - """ - _response = await self._raw_client.call( - version_id=version_id, - environment=environment, - path=path, - id=id, - tool=tool, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - tool_call_request_environment=tool_call_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateToolLogResponse: - """ - Log to a Tool. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool, if not we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateToolLogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' - , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} - , 'required': ['a', 'b'] - }}}, inputs={'a': 5 - , 'b': 7 - }, output='35', ) - asyncio.run(main()) - """ - _response = await self._raw_client.log( - version_id=version_id, - environment=environment, - path=path, - id=id, - tool=tool, - start_time=start_time, - end_time=end_time, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - log_status=log_status, - source_datapoint_id=source_datapoint_id, - trace_parent_id=trace_parent_id, - user=user, - tool_log_request_environment=tool_log_request_environment, - save=save, - log_id=log_id, - request_options=request_options, - ) - return _response.data - - async def update( - self, - id: str, - log_id: str, - *, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> LogResponse: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LogResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.update(id='id', log_id='log_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update( - id, - log_id, - output=output, - created_at=created_at, - error=error, - provider_latency=provider_latency, - stdout=stdout, - provider_request=provider_request, - provider_response=provider_response, - inputs=inputs, - source=source, - metadata=metadata, - start_time=start_time, - end_time=end_time, - log_status=log_status, - request_options=request_options, - ) - return _response.data - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[ToolResponse]: - """ - Get a list of all Tools. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Tools to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Tool name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Tools by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[ToolResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - response = await client.tools.list(size=1, ) - async for item in response: - yield item - - # alternatively, you can paginate page-by-page - async for page in response.iter_pages(): - yield page - asyncio.run(main()) - """ - return await self._raw_client.list( - page=page, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - async def upsert( - self, - *, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - function: typing.Optional[ToolFunctionParams] = OMIT, - source_code: typing.Optional[str] = OMIT, - setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tool_type: typing.Optional[FilesToolType] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Create a Tool or update it with a new version if it already exists. - - Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Tool - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - function : typing.Optional[ToolFunctionParams] - Callable function specification of the Tool shown to the model for tool calling. - - source_code : typing.Optional[str] - Code source of the Tool. - - setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - - tool_type : typing.Optional[FilesToolType] - Type of Tool. - - version_name : typing.Optional[str] - Unique identifier for this Tool version. Each Tool can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.upsert(path='math-tool', function={'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' - , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} - , 'required': ['a', 'b'] - }}, version_name='math-tool-v1', version_description='Simple math tool that multiplies two numbers', ) - asyncio.run(main()) - """ - _response = await self._raw_client.upsert( - path=path, - id=id, - function=function, - source_code=source_code, - setup_values=setup_values, - attributes=attributes, - tool_type=tool_type, - version_name=version_name, - version_description=version_description, - request_options=request_options, - ) - return _response.data - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Retrieve the Tool with the given ID. - - By default, the deployed version of the Tool is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : typing.Optional[str] - A specific Version ID of the Tool to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.get(id='tl_789ghi', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get( - id, version_id=version_id, environment=environment, request_options=request_options - ) - return _response.data - - async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: - """ - Delete the Tool with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.delete(id='tl_789ghi', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete(id, request_options=request_options) - return _response.data - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Move the Tool to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - path : typing.Optional[str] - Path of the Tool including the Tool name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Tool, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.move(id='tl_789ghi', path='new directory/new name', ) - asyncio.run(main()) - """ - _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) - return _response.data - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ListTools: - """ - Get a list of all the versions of a Tool. - - Parameters - ---------- - id : str - Unique identifier for the Tool. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ListTools - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.list_versions(id='tl_789ghi', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_versions( - id, evaluator_aggregates=evaluator_aggregates, request_options=request_options - ) - return _response.data - - async def delete_tool_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Delete a version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.delete_tool_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete_tool_version(id, version_id, request_options=request_options) - return _response.data - - async def update_tool_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Update the name or description of the Tool version. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.update_tool_version(id='id', version_id='version_id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_tool_version( - id, version_id, name=name, description=description, request_options=request_options - ) - return _response.data - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponse: - """ - Deploy Tool to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.set_deployment(id='tl_789ghi', environment_id='staging', version_id='tv_012jkl', ) - asyncio.run(main()) - """ - _response = await self._raw_client.set_deployment( - id, environment_id, version_id=version_id, request_options=request_options - ) - return _response.data - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: - """ - Remove deployed Tool from the Environment. - - Remove the deployed version for the specified Environment. This Tool - will no longer be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - None - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.remove_deployment(id='tl_789ghi', environment_id='staging', ) - asyncio.run(main()) - """ - _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) - return _response.data - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentResponse]: - """ - List all Environments and their deployed versions for the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentResponse] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.list_environments(id='tl_789ghi', ) - asyncio.run(main()) - """ - _response = await self._raw_client.list_environments(id, request_options=request_options) - return _response.data - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> ToolResponse: - """ - Activate and deactivate Evaluators for monitoring the Tool. - - An activated Evaluator will automatically be run on all new Logs - within the Tool for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ToolResponse - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.update_monitoring(id='tl_789ghi', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.update_monitoring( - id, activate=activate, deactivate=deactivate, request_options=request_options - ) - return _response.data - - async def get_environment_variables( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentVariableRequest]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentVariableRequest] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.get_environment_variables(id='id', ) - asyncio.run(main()) - """ - _response = await self._raw_client.get_environment_variables(id, request_options=request_options) - return _response.data - - async def add_environment_variable( - self, - id: str, - *, - request: typing.Sequence[FileEnvironmentVariableRequestParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.List[FileEnvironmentVariableRequest]: - """ - Add an environment variable to a Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request : typing.Sequence[FileEnvironmentVariableRequestParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentVariableRequest] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.add_environment_variable(id='id', request=[{'name': 'name', 'value': 'value'}], ) - asyncio.run(main()) - """ - _response = await self._raw_client.add_environment_variable( - id, request=request, request_options=request_options - ) - return _response.data - - async def delete_environment_variable( - self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[FileEnvironmentVariableRequest]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - name : str - Name of the Environment Variable to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[FileEnvironmentVariableRequest] - Successful Response - - Examples - -------- - from humanloop import AsyncHumanloop - import asyncio - client = AsyncHumanloop(api_key="YOUR_API_KEY", ) - async def main() -> None: - await client.tools.delete_environment_variable(id='id', name='name', ) - asyncio.run(main()) - """ - _response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options) - return _response.data diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py deleted file mode 100644 index 85bbef9e..00000000 --- a/src/humanloop/tools/raw_client.py +++ /dev/null @@ -1,2917 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing -from json.decoder import JSONDecodeError - -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper -from ..core.http_response import AsyncHttpResponse, HttpResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager -from ..core.request_options import RequestOptions -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..requests.evaluator_activation_deactivation_request_activate_item import ( - EvaluatorActivationDeactivationRequestActivateItemParams, -) -from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItemParams, -) -from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams -from ..requests.tool_function import ToolFunctionParams -from ..requests.tool_kernel_request import ToolKernelRequestParams -from ..types.create_tool_log_response import CreateToolLogResponse -from ..types.file_environment_response import FileEnvironmentResponse -from ..types.file_environment_variable_request import FileEnvironmentVariableRequest -from ..types.file_sort_by import FileSortBy -from ..types.files_tool_type import FilesToolType -from ..types.http_validation_error import HttpValidationError -from ..types.list_tools import ListTools -from ..types.log_response import LogResponse -from ..types.log_status import LogStatus -from ..types.paginated_data_tool_response import PaginatedDataToolResponse -from ..types.sort_order import SortOrder -from ..types.tool_call_response import ToolCallResponse -from ..types.tool_response import ToolResponse - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class RawToolsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ToolCallResponse]: - """ - Call a Tool. - - Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to call. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to call. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ToolCallResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "tools/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": tool_call_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolCallResponse, - construct_type( - type_=ToolCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[CreateToolLogResponse]: - """ - Log to a Tool. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool, if not we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[CreateToolLogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "tools/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": tool_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateToolLogResponse, - construct_type( - type_=CreateToolLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update( - self, - id: str, - log_id: str, - *, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[LogResponse]: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[LogResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> SyncPager[ToolResponse]: - """ - Get a list of all Tools. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Tools to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Tool name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Tools by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SyncPager[ToolResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = self._client_wrapper.httpx_client.request( - "tools", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataToolResponse, - construct_type( - type_=PaginatedDataToolResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - _get_next = lambda: self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - return SyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def upsert( - self, - *, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - function: typing.Optional[ToolFunctionParams] = OMIT, - source_code: typing.Optional[str] = OMIT, - setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tool_type: typing.Optional[FilesToolType] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ToolResponse]: - """ - Create a Tool or update it with a new version if it already exists. - - Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Tool - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - function : typing.Optional[ToolFunctionParams] - Callable function specification of the Tool shown to the model for tool calling. - - source_code : typing.Optional[str] - Code source of the Tool. - - setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - - tool_type : typing.Optional[FilesToolType] - Type of Tool. - - version_name : typing.Optional[str] - Unique identifier for this Tool version. Each Tool can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ToolResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - "tools", - method="POST", - json={ - "path": path, - "id": id, - "function": convert_and_respect_annotation_metadata( - object_=function, annotation=ToolFunctionParams, direction="write" - ), - "source_code": source_code, - "setup_values": setup_values, - "attributes": attributes, - "tool_type": tool_type, - "version_name": version_name, - "version_description": version_description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ToolResponse]: - """ - Retrieve the Tool with the given ID. - - By default, the deployed version of the Tool is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : typing.Optional[str] - A specific Version ID of the Tool to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ToolResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: - """ - Delete the Tool with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ToolResponse]: - """ - Move the Tool to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - path : typing.Optional[str] - Path of the Tool including the Tool name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Tool, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ToolResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ListTools]: - """ - Get a list of all the versions of a Tool. - - Parameters - ---------- - id : str - Unique identifier for the Tool. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ListTools] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListTools, - construct_type( - type_=ListTools, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete_tool_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Delete a version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_tool_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ToolResponse]: - """ - Update the name or description of the Tool version. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ToolResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[ToolResponse]: - """ - Deploy Tool to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ToolResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[None]: - """ - Remove deployed Tool from the Environment. - - Remove the deployed version for the specified Environment. This Tool - will no longer be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[None] - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[ToolResponse]: - """ - Activate and deactivate Evaluators for monitoring the Tool. - - An activated Evaluator will automatically be run on all new Logs - within the Tool for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[ToolResponse] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def get_environment_variables( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentVariableRequest]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environment-variables", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentVariableRequest], - construct_type( - type_=typing.List[FileEnvironmentVariableRequest], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def add_environment_variable( - self, - id: str, - *, - request: typing.Sequence[FileEnvironmentVariableRequestParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: - """ - Add an environment variable to a Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request : typing.Sequence[FileEnvironmentVariableRequestParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentVariableRequest]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environment-variables", - method="POST", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" - ), - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentVariableRequest], - construct_type( - type_=typing.List[FileEnvironmentVariableRequest], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - def delete_environment_variable( - self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - name : str - Name of the Environment Variable to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - HttpResponse[typing.List[FileEnvironmentVariableRequest]] - Successful Response - """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentVariableRequest], - construct_type( - type_=typing.List[FileEnvironmentVariableRequest], # type: ignore - object_=_response.json(), - ), - ) - return HttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - -class AsyncRawToolsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def call( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_call_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ToolCallResponse]: - """ - Call a Tool. - - Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise, the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool. If they do not, we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to call. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to call. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_call_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ToolCallResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "tools/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": tool_call_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolCallResponse, - construct_type( - type_=ToolCallResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def log( - self, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[CreateToolLogResponse]: - """ - Log to a Tool. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool, if not we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. - - Parameters - ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - source_datapoint_id : typing.Optional[str] - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - - trace_parent_id : typing.Optional[str] - The ID of the parent Log to nest this Log under in a Trace. - - user : typing.Optional[str] - End-user ID related to the Log. - - tool_log_request_environment : typing.Optional[str] - The name of the Environment the Log is associated to. - - save : typing.Optional[bool] - Whether the request/response payloads will be stored on Humanloop. - - log_id : typing.Optional[str] - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[CreateToolLogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "tools/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": tool_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - CreateToolLogResponse, - construct_type( - type_=CreateToolLogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update( - self, - id: str, - log_id: str, - *, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[LogResponse]: - """ - Update a Log. - - Update the details of a Log with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Prompt. - - log_id : str - Unique identifier for the Log. - - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - - created_at : typing.Optional[dt.datetime] - User defined timestamp for when the log was created. - - error : typing.Optional[str] - Error message if the log is an error. - - provider_latency : typing.Optional[float] - Duration of the logged event in seconds. - - stdout : typing.Optional[str] - Captured log and debug statements. - - provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw request sent to provider. - - provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Raw response received the provider. - - inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - The inputs passed to the prompt template. - - source : typing.Optional[str] - Identifies where the model was called from. - - metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Any additional metadata to record. - - start_time : typing.Optional[dt.datetime] - When the logged event started. - - end_time : typing.Optional[dt.datetime] - When the logged event ended. - - log_status : typing.Optional[LogStatus] - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[LogResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list( - self, - *, - page: typing.Optional[int] = None, - size: typing.Optional[int] = None, - name: typing.Optional[str] = None, - user_filter: typing.Optional[str] = None, - sort_by: typing.Optional[FileSortBy] = None, - order: typing.Optional[SortOrder] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncPager[ToolResponse]: - """ - Get a list of all Tools. - - Parameters - ---------- - page : typing.Optional[int] - Page offset for pagination. - - size : typing.Optional[int] - Page size for pagination. Number of Tools to fetch. - - name : typing.Optional[str] - Case-insensitive filter for Tool name. - - user_filter : typing.Optional[str] - Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. - - sort_by : typing.Optional[FileSortBy] - Field to sort Tools by - - order : typing.Optional[SortOrder] - Direction to sort by. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncPager[ToolResponse] - Successful Response - """ - page = page if page is not None else 1 - - _response = await self._client_wrapper.httpx_client.request( - "tools", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "user_filter": user_filter, - "sort_by": sort_by, - "order": order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - PaginatedDataToolResponse, - construct_type( - type_=PaginatedDataToolResponse, # type: ignore - object_=_response.json(), - ), - ) - _items = _parsed_response.records - _has_next = True - - async def _get_next(): - return await self.list( - page=page + 1, - size=size, - name=name, - user_filter=user_filter, - sort_by=sort_by, - order=order, - request_options=request_options, - ) - - return AsyncPager( - has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def upsert( - self, - *, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - function: typing.Optional[ToolFunctionParams] = OMIT, - source_code: typing.Optional[str] = OMIT, - setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tool_type: typing.Optional[FilesToolType] = OMIT, - version_name: typing.Optional[str] = OMIT, - version_description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ToolResponse]: - """ - Create a Tool or update it with a new version if it already exists. - - Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. - - You can provide `version_name` and `version_description` to identify and describe your versions. - Version names must be unique within a Tool - attempting to create a version with a name - that already exists will result in a 409 Conflict error. - - Parameters - ---------- - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - function : typing.Optional[ToolFunctionParams] - Callable function specification of the Tool shown to the model for tool calling. - - source_code : typing.Optional[str] - Code source of the Tool. - - setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - - attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - - tool_type : typing.Optional[FilesToolType] - Type of Tool. - - version_name : typing.Optional[str] - Unique identifier for this Tool version. Each Tool can only have one version with a given name. - - version_description : typing.Optional[str] - Description of the Version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ToolResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - "tools", - method="POST", - json={ - "path": path, - "id": id, - "function": convert_and_respect_annotation_metadata( - object_=function, annotation=ToolFunctionParams, direction="write" - ), - "source_code": source_code, - "setup_values": setup_values, - "attributes": attributes, - "tool_type": tool_type, - "version_name": version_name, - "version_description": version_description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get( - self, - id: str, - *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ToolResponse]: - """ - Retrieve the Tool with the given ID. - - By default, the deployed version of the Tool is returned. Use the query parameters - `version_id` or `environment` to target a specific version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : typing.Optional[str] - A specific Version ID of the Tool to retrieve. - - environment : typing.Optional[str] - Name of the Environment to retrieve a deployed Version from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ToolResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete the Tool with the given ID. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def move( - self, - id: str, - *, - path: typing.Optional[str] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ToolResponse]: - """ - Move the Tool to a different path or change the name. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - path : typing.Optional[str] - Path of the Tool including the Tool name, which is used as a unique identifier. - - name : typing.Optional[str] - Name of the Tool, which is used as a unique identifier. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ToolResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_versions( - self, - id: str, - *, - evaluator_aggregates: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ListTools]: - """ - Get a list of all the versions of a Tool. - - Parameters - ---------- - id : str - Unique identifier for the Tool. - - evaluator_aggregates : typing.Optional[bool] - Whether to include Evaluator aggregate results for the versions in the response - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ListTools] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ListTools, - construct_type( - type_=ListTools, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete_tool_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Delete a version of the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_tool_version( - self, - id: str, - version_id: str, - *, - name: typing.Optional[str] = OMIT, - description: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ToolResponse]: - """ - Update the name or description of the Tool version. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - version_id : str - Unique identifier for the specific version of the Tool. - - name : typing.Optional[str] - Name of the version. - - description : typing.Optional[str] - Description of the version. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ToolResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="PATCH", - json={ - "name": name, - "description": description, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def set_deployment( - self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[ToolResponse]: - """ - Deploy Tool to an Environment. - - Set the deployed version for the specified Environment. This Prompt - will be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to deploy the Version to. - - version_id : str - Unique identifier for the specific version of the Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ToolResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def remove_deployment( - self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[None]: - """ - Remove deployed Tool from the Environment. - - Remove the deployed version for the specified Environment. This Tool - will no longer be used for calls made to the Tool in this Environment. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - environment_id : str - Unique identifier for the Environment to remove the deployment from. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[None] - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return AsyncHttpResponse(response=_response, data=None) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def list_environments( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: - """ - List all Environments and their deployed versions for the Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentResponse]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def update_monitoring( - self, - id: str, - *, - activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, - deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[ToolResponse]: - """ - Activate and deactivate Evaluators for monitoring the Tool. - - An activated Evaluator will automatically be run on all new Logs - within the Tool for monitoring purposes. - - Parameters - ---------- - id : str - - activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[ToolResponse] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def get_environment_variables( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environment-variables", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentVariableRequest], - construct_type( - type_=typing.List[FileEnvironmentVariableRequest], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def add_environment_variable( - self, - id: str, - *, - request: typing.Sequence[FileEnvironmentVariableRequestParams], - request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: - """ - Add an environment variable to a Tool. - - Parameters - ---------- - id : str - Unique identifier for Tool. - - request : typing.Sequence[FileEnvironmentVariableRequestParams] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environment-variables", - method="POST", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" - ), - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentVariableRequest], - construct_type( - type_=typing.List[FileEnvironmentVariableRequest], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) - - async def delete_environment_variable( - self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: - """ - Parameters - ---------- - id : str - Unique identifier for File. - - name : str - Name of the Environment Variable to delete. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] - Successful Response - """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - _data = typing.cast( - typing.List[FileEnvironmentVariableRequest], - construct_type( - type_=typing.List[FileEnvironmentVariableRequest], # type: ignore - object_=_response.json(), - ), - ) - return AsyncHttpResponse(response=_response, data=_data) - if _response.status_code == 422: - raise UnprocessableEntityError( - headers=dict(_response.headers), - body=typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) - raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py deleted file mode 100644 index 5662ea6d..00000000 --- a/src/humanloop/types/__init__.py +++ /dev/null @@ -1,411 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -# isort: skip_file - -from .agent_call_response import AgentCallResponse -from .agent_call_response_tool_choice import AgentCallResponseToolChoice -from .agent_call_stream_response import AgentCallStreamResponse -from .agent_call_stream_response_payload import AgentCallStreamResponsePayload -from .agent_config_response import AgentConfigResponse -from .agent_continue_call_response import AgentContinueCallResponse -from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice -from .agent_continue_call_stream_response import AgentContinueCallStreamResponse -from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload -from .agent_inline_tool import AgentInlineTool -from .agent_kernel_request import AgentKernelRequest -from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort -from .agent_kernel_request_stop import AgentKernelRequestStop -from .agent_kernel_request_template import AgentKernelRequestTemplate -from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem -from .agent_linked_file_request import AgentLinkedFileRequest -from .agent_linked_file_response import AgentLinkedFileResponse -from .agent_linked_file_response_file import AgentLinkedFileResponseFile -from .agent_log_response import AgentLogResponse -from .agent_log_response_tool_choice import AgentLogResponseToolChoice -from .agent_log_stream_response import AgentLogStreamResponse -from .agent_response import AgentResponse -from .agent_response_reasoning_effort import AgentResponseReasoningEffort -from .agent_response_stop import AgentResponseStop -from .agent_response_template import AgentResponseTemplate -from .agent_response_tools_item import AgentResponseToolsItem -from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent -from .anthropic_thinking_content import AnthropicThinkingContent -from .base_models_user_response import BaseModelsUserResponse -from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse -from .chat_message import ChatMessage -from .chat_message_content import ChatMessageContent -from .chat_message_content_item import ChatMessageContentItem -from .chat_message_thinking_item import ChatMessageThinkingItem -from .chat_role import ChatRole -from .chat_tool_type import ChatToolType -from .code_evaluator_request import CodeEvaluatorRequest -from .config_tool_response import ConfigToolResponse -from .create_agent_log_response import CreateAgentLogResponse -from .create_datapoint_request import CreateDatapointRequest -from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue -from .create_evaluator_log_response import CreateEvaluatorLogResponse -from .create_flow_log_response import CreateFlowLogResponse -from .create_prompt_log_response import CreatePromptLogResponse -from .create_tool_log_response import CreateToolLogResponse -from .dashboard_configuration import DashboardConfiguration -from .datapoint_response import DatapointResponse -from .datapoint_response_target_value import DatapointResponseTargetValue -from .dataset_response import DatasetResponse -from .datasets_request import DatasetsRequest -from .directory_response import DirectoryResponse -from .directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse -from .directory_with_parents_and_children_response_files_item import DirectoryWithParentsAndChildrenResponseFilesItem -from .environment_response import EnvironmentResponse -from .environment_tag import EnvironmentTag -from .evaluatee_request import EvaluateeRequest -from .evaluatee_response import EvaluateeResponse -from .evaluation_evaluator_response import EvaluationEvaluatorResponse -from .evaluation_log_response import EvaluationLogResponse -from .evaluation_response import EvaluationResponse -from .evaluation_run_response import EvaluationRunResponse -from .evaluation_runs_response import EvaluationRunsResponse -from .evaluation_stats import EvaluationStats -from .evaluation_status import EvaluationStatus -from .evaluations_dataset_request import EvaluationsDatasetRequest -from .evaluations_request import EvaluationsRequest -from .evaluator_activation_deactivation_request import EvaluatorActivationDeactivationRequest -from .evaluator_activation_deactivation_request_activate_item import EvaluatorActivationDeactivationRequestActivateItem -from .evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItem, -) -from .evaluator_aggregate import EvaluatorAggregate -from .evaluator_arguments_type import EvaluatorArgumentsType -from .evaluator_config_response import EvaluatorConfigResponse -from .evaluator_file_id import EvaluatorFileId -from .evaluator_file_path import EvaluatorFilePath -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse -from .evaluator_log_response import EvaluatorLogResponse -from .evaluator_log_response_judgment import EvaluatorLogResponseJudgment -from .evaluator_response import EvaluatorResponse -from .evaluator_response_spec import EvaluatorResponseSpec -from .evaluator_return_type_enum import EvaluatorReturnTypeEnum -from .evaluator_version_id import EvaluatorVersionId -from .evaluators_request import EvaluatorsRequest -from .event_type import EventType -from .external_evaluator_request import ExternalEvaluatorRequest -from .feedback_type import FeedbackType -from .file_environment_response import FileEnvironmentResponse -from .file_environment_response_file import FileEnvironmentResponseFile -from .file_environment_variable_request import FileEnvironmentVariableRequest -from .file_id import FileId -from .file_path import FilePath -from .file_request import FileRequest -from .file_sort_by import FileSortBy -from .file_type import FileType -from .files_tool_type import FilesToolType -from .flow_kernel_request import FlowKernelRequest -from .flow_log_response import FlowLogResponse -from .flow_response import FlowResponse -from .function_tool import FunctionTool -from .function_tool_choice import FunctionToolChoice -from .http_validation_error import HttpValidationError -from .human_evaluator_request import HumanEvaluatorRequest -from .human_evaluator_request_return_type import HumanEvaluatorRequestReturnType -from .image_chat_content import ImageChatContent -from .image_url import ImageUrl -from .image_url_detail import ImageUrlDetail -from .input_response import InputResponse -from .linked_file_request import LinkedFileRequest -from .linked_tool_response import LinkedToolResponse -from .list_agents import ListAgents -from .list_datasets import ListDatasets -from .list_evaluators import ListEvaluators -from .list_flows import ListFlows -from .list_prompts import ListPrompts -from .list_tools import ListTools -from .llm_evaluator_request import LlmEvaluatorRequest -from .log_response import LogResponse -from .log_status import LogStatus -from .log_stream_response import LogStreamResponse -from .model_endpoints import ModelEndpoints -from .model_providers import ModelProviders -from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest -from .monitoring_evaluator_response import MonitoringEvaluatorResponse -from .monitoring_evaluator_state import MonitoringEvaluatorState -from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest -from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse -from .observability_status import ObservabilityStatus -from .on_agent_call_enum import OnAgentCallEnum -from .open_ai_reasoning_effort import OpenAiReasoningEffort -from .overall_stats import OverallStats -from .paginated_data_agent_response import PaginatedDataAgentResponse -from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse -from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse -from .paginated_data_flow_response import PaginatedDataFlowResponse -from .paginated_data_log_response import PaginatedDataLogResponse -from .paginated_data_prompt_response import PaginatedDataPromptResponse -from .paginated_data_tool_response import PaginatedDataToolResponse -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, -) -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, -) -from .paginated_datapoint_response import PaginatedDatapointResponse -from .paginated_dataset_response import PaginatedDatasetResponse -from .paginated_evaluation_response import PaginatedEvaluationResponse -from .paginated_prompt_log_response import PaginatedPromptLogResponse -from .paginated_session_response import PaginatedSessionResponse -from .platform_access_enum import PlatformAccessEnum -from .populate_template_response import PopulateTemplateResponse -from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate -from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort -from .populate_template_response_stop import PopulateTemplateResponseStop -from .populate_template_response_template import PopulateTemplateResponseTemplate -from .prompt_call_log_response import PromptCallLogResponse -from .prompt_call_response import PromptCallResponse -from .prompt_call_response_tool_choice import PromptCallResponseToolChoice -from .prompt_call_stream_response import PromptCallStreamResponse -from .prompt_kernel_request import PromptKernelRequest -from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort -from .prompt_kernel_request_stop import PromptKernelRequestStop -from .prompt_kernel_request_template import PromptKernelRequestTemplate -from .prompt_log_response import PromptLogResponse -from .prompt_log_response_tool_choice import PromptLogResponseToolChoice -from .prompt_response import PromptResponse -from .prompt_response_reasoning_effort import PromptResponseReasoningEffort -from .prompt_response_stop import PromptResponseStop -from .prompt_response_template import PromptResponseTemplate -from .provider_api_keys import ProviderApiKeys -from .response_format import ResponseFormat -from .response_format_type import ResponseFormatType -from .run_stats_response import RunStatsResponse -from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItem -from .run_version_response import RunVersionResponse -from .select_evaluator_stats_response import SelectEvaluatorStatsResponse -from .sort_order import SortOrder -from .template_language import TemplateLanguage -from .text_chat_content import TextChatContent -from .text_evaluator_stats_response import TextEvaluatorStatsResponse -from .time_unit import TimeUnit -from .tool_call import ToolCall -from .tool_call_response import ToolCallResponse -from .tool_choice import ToolChoice -from .tool_function import ToolFunction -from .tool_kernel_request import ToolKernelRequest -from .tool_log_response import ToolLogResponse -from .tool_response import ToolResponse -from .update_dateset_action import UpdateDatesetAction -from .update_evaluation_status_request import UpdateEvaluationStatusRequest -from .update_version_request import UpdateVersionRequest -from .user_response import UserResponse -from .valence import Valence -from .validation_error import ValidationError -from .validation_error_loc_item import ValidationErrorLocItem -from .version_deployment_response import VersionDeploymentResponse -from .version_deployment_response_file import VersionDeploymentResponseFile -from .version_id import VersionId -from .version_id_response import VersionIdResponse -from .version_id_response_version import VersionIdResponseVersion -from .version_reference_response import VersionReferenceResponse -from .version_stats_response import VersionStatsResponse -from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem -from .version_status import VersionStatus - -__all__ = [ - "AgentCallResponse", - "AgentCallResponseToolChoice", - "AgentCallStreamResponse", - "AgentCallStreamResponsePayload", - "AgentConfigResponse", - "AgentContinueCallResponse", - "AgentContinueCallResponseToolChoice", - "AgentContinueCallStreamResponse", - "AgentContinueCallStreamResponsePayload", - "AgentInlineTool", - "AgentKernelRequest", - "AgentKernelRequestReasoningEffort", - "AgentKernelRequestStop", - "AgentKernelRequestTemplate", - "AgentKernelRequestToolsItem", - "AgentLinkedFileRequest", - "AgentLinkedFileResponse", - "AgentLinkedFileResponseFile", - "AgentLogResponse", - "AgentLogResponseToolChoice", - "AgentLogStreamResponse", - "AgentResponse", - "AgentResponseReasoningEffort", - "AgentResponseStop", - "AgentResponseTemplate", - "AgentResponseToolsItem", - "AnthropicRedactedThinkingContent", - "AnthropicThinkingContent", - "BaseModelsUserResponse", - "BooleanEvaluatorStatsResponse", - "ChatMessage", - "ChatMessageContent", - "ChatMessageContentItem", - "ChatMessageThinkingItem", - "ChatRole", - "ChatToolType", - "CodeEvaluatorRequest", - "ConfigToolResponse", - "CreateAgentLogResponse", - "CreateDatapointRequest", - "CreateDatapointRequestTargetValue", - "CreateEvaluatorLogResponse", - "CreateFlowLogResponse", - "CreatePromptLogResponse", - "CreateToolLogResponse", - "DashboardConfiguration", - "DatapointResponse", - "DatapointResponseTargetValue", - "DatasetResponse", - "DatasetsRequest", - "DirectoryResponse", - "DirectoryWithParentsAndChildrenResponse", - "DirectoryWithParentsAndChildrenResponseFilesItem", - "EnvironmentResponse", - "EnvironmentTag", - "EvaluateeRequest", - "EvaluateeResponse", - "EvaluationEvaluatorResponse", - "EvaluationLogResponse", - "EvaluationResponse", - "EvaluationRunResponse", - "EvaluationRunsResponse", - "EvaluationStats", - "EvaluationStatus", - "EvaluationsDatasetRequest", - "EvaluationsRequest", - "EvaluatorActivationDeactivationRequest", - "EvaluatorActivationDeactivationRequestActivateItem", - "EvaluatorActivationDeactivationRequestDeactivateItem", - "EvaluatorAggregate", - "EvaluatorArgumentsType", - "EvaluatorConfigResponse", - "EvaluatorFileId", - "EvaluatorFilePath", - "EvaluatorJudgmentNumberLimit", - "EvaluatorJudgmentOptionResponse", - "EvaluatorLogResponse", - "EvaluatorLogResponseJudgment", - "EvaluatorResponse", - "EvaluatorResponseSpec", - "EvaluatorReturnTypeEnum", - "EvaluatorVersionId", - "EvaluatorsRequest", - "EventType", - "ExternalEvaluatorRequest", - "FeedbackType", - "FileEnvironmentResponse", - "FileEnvironmentResponseFile", - "FileEnvironmentVariableRequest", - "FileId", - "FilePath", - "FileRequest", - "FileSortBy", - "FileType", - "FilesToolType", - "FlowKernelRequest", - "FlowLogResponse", - "FlowResponse", - "FunctionTool", - "FunctionToolChoice", - "HttpValidationError", - "HumanEvaluatorRequest", - "HumanEvaluatorRequestReturnType", - "ImageChatContent", - "ImageUrl", - "ImageUrlDetail", - "InputResponse", - "LinkedFileRequest", - "LinkedToolResponse", - "ListAgents", - "ListDatasets", - "ListEvaluators", - "ListFlows", - "ListPrompts", - "ListTools", - "LlmEvaluatorRequest", - "LogResponse", - "LogStatus", - "LogStreamResponse", - "ModelEndpoints", - "ModelProviders", - "MonitoringEvaluatorEnvironmentRequest", - "MonitoringEvaluatorResponse", - "MonitoringEvaluatorState", - "MonitoringEvaluatorVersionRequest", - "NumericEvaluatorStatsResponse", - "ObservabilityStatus", - "OnAgentCallEnum", - "OpenAiReasoningEffort", - "OverallStats", - "PaginatedDataAgentResponse", - "PaginatedDataEvaluationLogResponse", - "PaginatedDataEvaluatorResponse", - "PaginatedDataFlowResponse", - "PaginatedDataLogResponse", - "PaginatedDataPromptResponse", - "PaginatedDataToolResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", - "PaginatedDatapointResponse", - "PaginatedDatasetResponse", - "PaginatedEvaluationResponse", - "PaginatedPromptLogResponse", - "PaginatedSessionResponse", - "PlatformAccessEnum", - "PopulateTemplateResponse", - "PopulateTemplateResponsePopulatedTemplate", - "PopulateTemplateResponseReasoningEffort", - "PopulateTemplateResponseStop", - "PopulateTemplateResponseTemplate", - "PromptCallLogResponse", - "PromptCallResponse", - "PromptCallResponseToolChoice", - "PromptCallStreamResponse", - "PromptKernelRequest", - "PromptKernelRequestReasoningEffort", - "PromptKernelRequestStop", - "PromptKernelRequestTemplate", - "PromptLogResponse", - "PromptLogResponseToolChoice", - "PromptResponse", - "PromptResponseReasoningEffort", - "PromptResponseStop", - "PromptResponseTemplate", - "ProviderApiKeys", - "ResponseFormat", - "ResponseFormatType", - "RunStatsResponse", - "RunStatsResponseEvaluatorStatsItem", - "RunVersionResponse", - "SelectEvaluatorStatsResponse", - "SortOrder", - "TemplateLanguage", - "TextChatContent", - "TextEvaluatorStatsResponse", - "TimeUnit", - "ToolCall", - "ToolCallResponse", - "ToolChoice", - "ToolFunction", - "ToolKernelRequest", - "ToolLogResponse", - "ToolResponse", - "UpdateDatesetAction", - "UpdateEvaluationStatusRequest", - "UpdateVersionRequest", - "UserResponse", - "Valence", - "ValidationError", - "ValidationErrorLocItem", - "VersionDeploymentResponse", - "VersionDeploymentResponseFile", - "VersionId", - "VersionIdResponse", - "VersionIdResponseVersion", - "VersionReferenceResponse", - "VersionStatsResponse", - "VersionStatsResponseEvaluatorVersionStatsItem", - "VersionStatus", -] diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py deleted file mode 100644 index 9bf3bb70..00000000 --- a/src/humanloop/types/agent_call_response.py +++ /dev/null @@ -1,231 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_call_response_tool_choice import AgentCallResponseToolChoice -from .chat_message import ChatMessage -from .log_status import LogStatus - - -class AgentCallResponse(UncheckedBaseModel): - """ - Response model for a Agent call. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the provider. - """ - - prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing.Optional[str] = pydantic.Field(default=None) - """ - Reason the generation finished. - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None) - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - agent: "AgentResponse" = pydantic.Field() - """ - Agent that generated the Log. - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(AgentCallResponse) diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py deleted file mode 100644 index 2d5a032d..00000000 --- a/src/humanloop/types/agent_call_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoice - -AgentCallResponseToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py deleted file mode 100644 index c7fa9e1c..00000000 --- a/src/humanloop/types/agent_call_stream_response.py +++ /dev/null @@ -1,51 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_call_stream_response_payload import AgentCallStreamResponsePayload -from .event_type import EventType - - -class AgentCallStreamResponse(UncheckedBaseModel): - """ - Response model for calling Agent in streaming mode. - """ - - log_id: str - message: str - payload: typing.Optional[AgentCallStreamResponsePayload] = None - type: EventType - created_at: dt.datetime - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(AgentCallStreamResponse) diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py deleted file mode 100644 index 38120e12..00000000 --- a/src/humanloop/types/agent_call_stream_response_payload.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .log_response import LogResponse -from .log_stream_response import LogStreamResponse -from .tool_call import ToolCall - -AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall] diff --git a/src/humanloop/types/agent_config_response.py b/src/humanloop/types/agent_config_response.py deleted file mode 100644 index ba346181..00000000 --- a/src/humanloop/types/agent_config_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class AgentConfigResponse(UncheckedBaseModel): - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_continue_call_response.py b/src/humanloop/types/agent_continue_call_response.py deleted file mode 100644 index be988d07..00000000 --- a/src/humanloop/types/agent_continue_call_response.py +++ /dev/null @@ -1,231 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice -from .chat_message import ChatMessage -from .log_status import LogStatus - - -class AgentContinueCallResponse(UncheckedBaseModel): - """ - Response model for continuing an Agent call. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the provider. - """ - - prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing.Optional[str] = pydantic.Field(default=None) - """ - Reason the generation finished. - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing.Optional[AgentContinueCallResponseToolChoice] = pydantic.Field(default=None) - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - agent: "AgentResponse" = pydantic.Field() - """ - Agent that generated the Log. - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(AgentContinueCallResponse) diff --git a/src/humanloop/types/agent_continue_call_response_tool_choice.py b/src/humanloop/types/agent_continue_call_response_tool_choice.py deleted file mode 100644 index 731cf6b2..00000000 --- a/src/humanloop/types/agent_continue_call_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoice - -AgentContinueCallResponseToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/types/agent_continue_call_stream_response.py b/src/humanloop/types/agent_continue_call_stream_response.py deleted file mode 100644 index 39f7642d..00000000 --- a/src/humanloop/types/agent_continue_call_stream_response.py +++ /dev/null @@ -1,51 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload -from .event_type import EventType - - -class AgentContinueCallStreamResponse(UncheckedBaseModel): - """ - Response model for continuing an Agent call in streaming mode. - """ - - log_id: str - message: str - payload: typing.Optional[AgentContinueCallStreamResponsePayload] = None - type: EventType - created_at: dt.datetime - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(AgentContinueCallStreamResponse) diff --git a/src/humanloop/types/agent_continue_call_stream_response_payload.py b/src/humanloop/types/agent_continue_call_stream_response_payload.py deleted file mode 100644 index 1d51d8d2..00000000 --- a/src/humanloop/types/agent_continue_call_stream_response_payload.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .log_response import LogResponse -from .log_stream_response import LogStreamResponse -from .tool_call import ToolCall - -AgentContinueCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall] diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py deleted file mode 100644 index 466a0b2d..00000000 --- a/src/humanloop/types/agent_inline_tool.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .on_agent_call_enum import OnAgentCallEnum -from .tool_function import ToolFunction - - -class AgentInlineTool(UncheckedBaseModel): - type: typing.Literal["inline"] = "inline" - json_schema: ToolFunction - on_agent_call: typing.Optional[OnAgentCallEnum] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py deleted file mode 100644 index 9cd36a6a..00000000 --- a/src/humanloop/types/agent_kernel_request.py +++ /dev/null @@ -1,123 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort -from .agent_kernel_request_stop import AgentKernelRequestStop -from .agent_kernel_request_template import AgentKernelRequestTemplate -from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem -from .model_endpoints import ModelEndpoints -from .model_providers import ModelProviders -from .response_format import ResponseFormat -from .template_language import TemplateLanguage - - -class AgentKernelRequest(UncheckedBaseModel): - """ - Base class used by both PromptKernelRequest and AgentKernelRequest. - - Contains the consistent Prompt-related fields. - """ - - model: str = pydantic.Field() - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) - """ - The provider model endpoint used. - """ - - template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None) - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) - """ - The template language to use for rendering the template. - """ - - provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) - """ - The company providing the underlying model service. - """ - - max_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing.Optional[float] = pydantic.Field(default=None) - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing.Optional[float] = pydantic.Field(default=None) - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None) - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing.Optional[int] = pydantic.Field(default=None) - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None) - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - max_iterations: typing.Optional[int] = pydantic.Field(default=None) - """ - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py deleted file mode 100644 index 3a0d2d24..00000000 --- a/src/humanloop/types/agent_kernel_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .open_ai_reasoning_effort import OpenAiReasoningEffort - -AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py deleted file mode 100644 index e38c12e2..00000000 --- a/src/humanloop/types/agent_kernel_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentKernelRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py deleted file mode 100644 index 62f4d40f..00000000 --- a/src/humanloop/types/agent_kernel_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessage - -AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py deleted file mode 100644 index 043bb29b..00000000 --- a/src/humanloop/types/agent_kernel_request_tools_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_inline_tool import AgentInlineTool -from .agent_linked_file_request import AgentLinkedFileRequest - -AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py deleted file mode 100644 index 5d110bad..00000000 --- a/src/humanloop/types/agent_linked_file_request.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .linked_file_request import LinkedFileRequest -from .on_agent_call_enum import OnAgentCallEnum - - -class AgentLinkedFileRequest(UncheckedBaseModel): - type: typing.Literal["file"] = "file" - link: LinkedFileRequest - on_agent_call: typing.Optional[OnAgentCallEnum] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py deleted file mode 100644 index 9788f37d..00000000 --- a/src/humanloop/types/agent_linked_file_response.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .linked_file_request import LinkedFileRequest -from .on_agent_call_enum import OnAgentCallEnum - - -class AgentLinkedFileResponse(UncheckedBaseModel): - type: typing.Literal["file"] = "file" - link: LinkedFileRequest - on_agent_call: typing.Optional[OnAgentCallEnum] = None - file: typing.Optional["AgentLinkedFileResponseFile"] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402, F401, I001 - -update_forward_refs(AgentLinkedFileResponse) diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py deleted file mode 100644 index ab1b384e..00000000 --- a/src/humanloop/types/agent_linked_file_response_file.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .dataset_response import DatasetResponse - -if typing.TYPE_CHECKING: - from .agent_response import AgentResponse - from .evaluator_response import EvaluatorResponse - from .flow_response import FlowResponse - from .prompt_response import PromptResponse - from .tool_response import ToolResponse -AgentLinkedFileResponseFile = typing.Union[ - "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" -] diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py deleted file mode 100644 index 634ad4d0..00000000 --- a/src/humanloop/types/agent_log_response.py +++ /dev/null @@ -1,225 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_log_response_tool_choice import AgentLogResponseToolChoice -from .chat_message import ChatMessage -from .log_status import LogStatus - - -class AgentLogResponse(UncheckedBaseModel): - """ - General request for creating a Log - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the provider. - """ - - prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing.Optional[str] = pydantic.Field(default=None) - """ - Reason the generation finished. - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None) - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - agent: "AgentResponse" = pydantic.Field() - """ - Agent that generated the Log. - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(AgentLogResponse) diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py deleted file mode 100644 index bf642cf5..00000000 --- a/src/humanloop/types/agent_log_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoice - -AgentLogResponseToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py deleted file mode 100644 index fb577067..00000000 --- a/src/humanloop/types/agent_log_stream_response.py +++ /dev/null @@ -1,99 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage - - -class AgentLogStreamResponse(UncheckedBaseModel): - """ - Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the provider. - """ - - prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing.Optional[str] = pydantic.Field(default=None) - """ - Reason the generation finished. - """ - - id: str = pydantic.Field() - """ - ID of the log. - """ - - agent_id: str = pydantic.Field() - """ - ID of the Agent the log belongs to. - """ - - version_id: str = pydantic.Field() - """ - ID of the specific version of the Agent. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py deleted file mode 100644 index cdc54812..00000000 --- a/src/humanloop/types/agent_response.py +++ /dev/null @@ -1,266 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -import typing_extensions -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.serialization import FieldMetadata -from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_response_reasoning_effort import AgentResponseReasoningEffort -from .agent_response_stop import AgentResponseStop -from .agent_response_template import AgentResponseTemplate -from .environment_response import EnvironmentResponse -from .evaluator_aggregate import EvaluatorAggregate -from .input_response import InputResponse -from .model_endpoints import ModelEndpoints -from .model_providers import ModelProviders -from .response_format import ResponseFormat -from .template_language import TemplateLanguage -from .user_response import UserResponse -from .version_status import VersionStatus - - -class AgentResponse(UncheckedBaseModel): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str = pydantic.Field() - """ - Path of the Agent, including the name, which is used as a unique identifier. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Agent. - """ - - directory_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the directory that the file is in on Humanloop. - """ - - model: str = pydantic.Field() - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) - """ - The provider model endpoint used. - """ - - template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None) - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) - """ - The template language to use for rendering the template. - """ - - provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) - """ - The company providing the underlying model service. - """ - - max_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing.Optional[float] = pydantic.Field(default=None) - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing.Optional[float] = pydantic.Field(default=None) - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None) - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing.Optional[int] = pydantic.Field(default=None) - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None) - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing.List["AgentResponseToolsItem"] = pydantic.Field() - """ - List of tools that the Agent can call. These can be linked files or inline tools. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - max_iterations: typing.Optional[int] = pydantic.Field(default=None) - """ - The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. - """ - - version_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique name for the Agent version. Version names must be unique for a given Agent. - """ - - version_description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the version, e.g., the changes made in this version. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Agent. - """ - - tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - List of tags associated with the file. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the file. - """ - - name: str = pydantic.Field() - """ - Name of the Agent. - """ - - schema_: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") - ] = pydantic.Field(default=None) - """ - The JSON schema for the Prompt. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. - """ - - type: typing.Optional[typing.Literal["agent"]] = None - environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) - """ - The list of environments the Agent Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who created the Agent. - """ - - committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who committed the Agent Version. - """ - - committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time the Agent Version was committed. - """ - - status: VersionStatus = pydantic.Field() - """ - The status of the Agent Version. - """ - - last_used_at: dt.datetime - version_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated for this Agent Version - """ - - total_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated across all Agent Versions - """ - - inputs: typing.List[InputResponse] = pydantic.Field() - """ - Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. - """ - - evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) - """ - Evaluators that have been attached to this Agent that are used for monitoring logs. - """ - - evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) - """ - Aggregation of Evaluator results for the Agent Version. - """ - - raw_file_content: typing.Optional[str] = pydantic.Field(default=None) - """ - The raw content of the Agent. Corresponds to the .agent file. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402, F401, I001 - -update_forward_refs(AgentResponse) diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py deleted file mode 100644 index b6fa28cd..00000000 --- a/src/humanloop/types/agent_response_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .open_ai_reasoning_effort import OpenAiReasoningEffort - -AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py deleted file mode 100644 index 5c3b6a48..00000000 --- a/src/humanloop/types/agent_response_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py deleted file mode 100644 index f5064815..00000000 --- a/src/humanloop/types/agent_response_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessage - -AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py deleted file mode 100644 index da6970e2..00000000 --- a/src/humanloop/types/agent_response_tools_item.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .agent_inline_tool import AgentInlineTool - -if typing.TYPE_CHECKING: - from .agent_linked_file_response import AgentLinkedFileResponse -AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool] diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py deleted file mode 100644 index 3e8e782e..00000000 --- a/src/humanloop/types/anthropic_redacted_thinking_content.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class AnthropicRedactedThinkingContent(UncheckedBaseModel): - type: typing.Literal["redacted_thinking"] = "redacted_thinking" - data: str = pydantic.Field() - """ - Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py deleted file mode 100644 index f61501bd..00000000 --- a/src/humanloop/types/anthropic_thinking_content.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class AnthropicThinkingContent(UncheckedBaseModel): - type: typing.Literal["thinking"] = "thinking" - thinking: str = pydantic.Field() - """ - Model's chain-of-thought for providing the response. - """ - - signature: str = pydantic.Field() - """ - Cryptographic signature that verifies the thinking block was generated by Anthropic. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/base_models_user_response.py b/src/humanloop/types/base_models_user_response.py deleted file mode 100644 index 8cd96829..00000000 --- a/src/humanloop/types/base_models_user_response.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -BaseModelsUserResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/boolean_evaluator_stats_response.py b/src/humanloop/types/boolean_evaluator_stats_response.py deleted file mode 100644 index 9452d923..00000000 --- a/src/humanloop/types/boolean_evaluator_stats_response.py +++ /dev/null @@ -1,58 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class BooleanEvaluatorStatsResponse(UncheckedBaseModel): - """ - Base attributes for stats for an Evaluator Version-Evaluated Version pair - in the Evaluation. - """ - - evaluator_version_id: str = pydantic.Field() - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int = pydantic.Field() - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int = pydantic.Field() - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int = pydantic.Field() - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int = pydantic.Field() - """ - The total number of errored Evaluators for this Evaluator Version. - """ - - num_true: int = pydantic.Field() - """ - The total number of `True` judgments for this Evaluator Version. - """ - - num_false: int = pydantic.Field() - """ - The total number of `False` judgments for this Evaluator Version. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py deleted file mode 100644 index 348752b5..00000000 --- a/src/humanloop/types/chat_message.py +++ /dev/null @@ -1,52 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message_content import ChatMessageContent -from .chat_message_thinking_item import ChatMessageThinkingItem -from .chat_role import ChatRole -from .tool_call import ToolCall - - -class ChatMessage(UncheckedBaseModel): - content: typing.Optional[ChatMessageContent] = pydantic.Field(default=None) - """ - The content of the message. - """ - - name: typing.Optional[str] = pydantic.Field(default=None) - """ - Optional name of the message author. - """ - - tool_call_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Tool call that this message is responding to. - """ - - role: ChatRole = pydantic.Field() - """ - Role of the message author. - """ - - tool_calls: typing.Optional[typing.List[ToolCall]] = pydantic.Field(default=None) - """ - A list of tool calls requested by the assistant. - """ - - thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None) - """ - Model's chain-of-thought for providing the response. Present on assistant messages if model supports it. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/chat_message_content.py b/src/humanloop/types/chat_message_content.py deleted file mode 100644 index fd31fa21..00000000 --- a/src/humanloop/types/chat_message_content.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message_content_item import ChatMessageContentItem - -ChatMessageContent = typing.Union[str, typing.List[ChatMessageContentItem]] diff --git a/src/humanloop/types/chat_message_content_item.py b/src/humanloop/types/chat_message_content_item.py deleted file mode 100644 index 1d27b28d..00000000 --- a/src/humanloop/types/chat_message_content_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .image_chat_content import ImageChatContent -from .text_chat_content import TextChatContent - -ChatMessageContentItem = typing.Union[TextChatContent, ImageChatContent] diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py deleted file mode 100644 index 2885c825..00000000 --- a/src/humanloop/types/chat_message_thinking_item.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent -from .anthropic_thinking_content import AnthropicThinkingContent - -ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent] diff --git a/src/humanloop/types/chat_role.py b/src/humanloop/types/chat_role.py deleted file mode 100644 index b5f6b1da..00000000 --- a/src/humanloop/types/chat_role.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ChatRole = typing.Union[typing.Literal["user", "assistant", "system", "tool", "developer"], typing.Any] diff --git a/src/humanloop/types/chat_tool_type.py b/src/humanloop/types/chat_tool_type.py deleted file mode 100644 index 8e488088..00000000 --- a/src/humanloop/types/chat_tool_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ChatToolType = typing.Literal["function"] diff --git a/src/humanloop/types/code_evaluator_request.py b/src/humanloop/types/code_evaluator_request.py deleted file mode 100644 index e8c574f9..00000000 --- a/src/humanloop/types/code_evaluator_request.py +++ /dev/null @@ -1,59 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluator_arguments_type import EvaluatorArgumentsType -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse -from .evaluator_return_type_enum import EvaluatorReturnTypeEnum -from .valence import Valence - - -class CodeEvaluatorRequest(UncheckedBaseModel): - arguments_type: EvaluatorArgumentsType = pydantic.Field() - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: EvaluatorReturnTypeEnum = pydantic.Field() - """ - The type of the return value of the Evaluator. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) - """ - The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. - """ - - number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing.Optional[Valence] = pydantic.Field(default=None) - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["python"] = "python" - code: str = pydantic.Field() - """ - The code for the Evaluator. This code will be executed in a sandboxed environment. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/config_tool_response.py b/src/humanloop/types/config_tool_response.py deleted file mode 100644 index 7ed7682f..00000000 --- a/src/humanloop/types/config_tool_response.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ConfigToolResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py deleted file mode 100644 index 2fe74aa4..00000000 --- a/src/humanloop/types/create_agent_log_response.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .log_status import LogStatus - - -class CreateAgentLogResponse(UncheckedBaseModel): - """ - Response for an Agent Log. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - agent_id: str = pydantic.Field() - """ - Unique identifier for the Agent. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the Agent Version. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_datapoint_request.py b/src/humanloop/types/create_datapoint_request.py deleted file mode 100644 index 31f3e4f7..00000000 --- a/src/humanloop/types/create_datapoint_request.py +++ /dev/null @@ -1,35 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage -from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue - - -class CreateDatapointRequest(UncheckedBaseModel): - inputs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) - """ - The inputs to the prompt template. - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - List of chat messages to provide to the model. - """ - - target: typing.Optional[typing.Dict[str, CreateDatapointRequestTargetValue]] = pydantic.Field(default=None) - """ - Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_datapoint_request_target_value.py b/src/humanloop/types/create_datapoint_request_target_value.py deleted file mode 100644 index 92a371fa..00000000 --- a/src/humanloop/types/create_datapoint_request_target_value.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateDatapointRequestTargetValue = typing.Union[ - str, int, float, bool, typing.List[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] -] diff --git a/src/humanloop/types/create_evaluator_log_response.py b/src/humanloop/types/create_evaluator_log_response.py deleted file mode 100644 index 9f917d3d..00000000 --- a/src/humanloop/types/create_evaluator_log_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class CreateEvaluatorLogResponse(UncheckedBaseModel): - id: str = pydantic.Field() - """ - String identifier of the new Log. - """ - - parent_id: str = pydantic.Field() - """ - Identifier of the evaluated parent Log. - """ - - session_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier of the Session containing both the parent and the new child Log. If the parent Log does not belong to a Session, a new Session is created with this ID. - """ - - version_id: str = pydantic.Field() - """ - Identifier of Evaluator Version for which the Log was registered. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_flow_log_response.py b/src/humanloop/types/create_flow_log_response.py deleted file mode 100644 index ae296a6f..00000000 --- a/src/humanloop/types/create_flow_log_response.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .log_status import LogStatus - - -class CreateFlowLogResponse(UncheckedBaseModel): - """ - Response for a Flow Log. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - flow_id: str = pydantic.Field() - """ - Unique identifier for the Flow. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the Flow Version. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_prompt_log_response.py b/src/humanloop/types/create_prompt_log_response.py deleted file mode 100644 index cd80d43b..00000000 --- a/src/humanloop/types/create_prompt_log_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class CreatePromptLogResponse(UncheckedBaseModel): - id: str = pydantic.Field() - """ - String ID of log. - """ - - prompt_id: str = pydantic.Field() - """ - ID of the Prompt the log belongs to. - """ - - version_id: str = pydantic.Field() - """ - ID of the specific version of the Prompt. - """ - - session_id: typing.Optional[str] = pydantic.Field(default=None) - """ - String ID of session the log belongs to. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_tool_log_response.py b/src/humanloop/types/create_tool_log_response.py deleted file mode 100644 index 6ba171fa..00000000 --- a/src/humanloop/types/create_tool_log_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class CreateToolLogResponse(UncheckedBaseModel): - id: str = pydantic.Field() - """ - String ID of log. - """ - - tool_id: str = pydantic.Field() - """ - ID of the Tool the log belongs to. - """ - - version_id: str = pydantic.Field() - """ - ID of the specific version of the Tool. - """ - - session_id: typing.Optional[str] = pydantic.Field(default=None) - """ - String ID of session the log belongs to. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/dashboard_configuration.py b/src/humanloop/types/dashboard_configuration.py deleted file mode 100644 index f5d752d8..00000000 --- a/src/humanloop/types/dashboard_configuration.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .time_unit import TimeUnit - - -class DashboardConfiguration(UncheckedBaseModel): - time_unit: TimeUnit - time_range_days: int - model_config_ids: typing.List[str] - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/datapoint_response.py b/src/humanloop/types/datapoint_response.py deleted file mode 100644 index 2eb4de68..00000000 --- a/src/humanloop/types/datapoint_response.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage -from .datapoint_response_target_value import DatapointResponseTargetValue - - -class DatapointResponse(UncheckedBaseModel): - inputs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) - """ - The inputs to the prompt template. - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - List of chat messages to provide to the model. - """ - - target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]] = pydantic.Field(default=None) - """ - Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Datapoint. Starts with `dp_`. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/datapoint_response_target_value.py b/src/humanloop/types/datapoint_response_target_value.py deleted file mode 100644 index c7f0f16e..00000000 --- a/src/humanloop/types/datapoint_response_target_value.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DatapointResponseTargetValue = typing.Union[ - str, int, float, bool, typing.List[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] -] diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py deleted file mode 100644 index 9153374a..00000000 --- a/src/humanloop/types/dataset_response.py +++ /dev/null @@ -1,117 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import pydantic -import typing_extensions -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.serialization import FieldMetadata -from ..core.unchecked_base_model import UncheckedBaseModel -from .datapoint_response import DatapointResponse -from .environment_response import EnvironmentResponse -from .user_response import UserResponse - - -class DatasetResponse(UncheckedBaseModel): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str = pydantic.Field() - """ - Path of the Dataset, including the name, which is used as a unique identifier. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Dataset. Starts with `ds_`. - """ - - directory_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the directory that the file is in on Humanloop. - """ - - name: str = pydantic.Field() - """ - Name of the Dataset, which is used as a unique identifier. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Dataset. - """ - - schema_: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") - ] = pydantic.Field(default=None) - """ - The JSON schema for the File. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the file. - """ - - tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - List of tags associated with the file. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the specific Dataset Version. If no query params provided, the default deployed Dataset Version is returned. Starts with `dsv_`. - """ - - type: typing.Optional[typing.Literal["dataset"]] = None - environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) - """ - The list of environments the Dataset Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who created the Dataset. - """ - - last_used_at: dt.datetime - version_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique name for the Dataset version. Version names must be unique for a given Dataset. - """ - - version_description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the version, e.g., the changes made in this version. - """ - - datapoints_count: int = pydantic.Field() - """ - The number of Datapoints in this Dataset version. - """ - - datapoints: typing.Optional[typing.List[DatapointResponse]] = pydantic.Field(default=None) - """ - The list of Datapoints in this Dataset version. Only provided if explicitly requested. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/datasets_request.py b/src/humanloop/types/datasets_request.py deleted file mode 100644 index 84e126aa..00000000 --- a/src/humanloop/types/datasets_request.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DatasetsRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/directory_response.py b/src/humanloop/types/directory_response.py deleted file mode 100644 index a56f0732..00000000 --- a/src/humanloop/types/directory_response.py +++ /dev/null @@ -1,57 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class DirectoryResponse(UncheckedBaseModel): - id: str = pydantic.Field() - """ - String ID of directory. Starts with `dir_`. - """ - - parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. - """ - - name: str = pydantic.Field() - """ - Name of the directory. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the directory. - """ - - path: str = pydantic.Field() - """ - Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the directory. - """ - - tags: typing.List[str] = pydantic.Field() - """ - List of tags associated with the directory. - """ - - created_at: dt.datetime - updated_at: dt.datetime - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py deleted file mode 100644 index a04de500..00000000 --- a/src/humanloop/types/directory_with_parents_and_children_response.py +++ /dev/null @@ -1,88 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .directory_response import DirectoryResponse -from .directory_with_parents_and_children_response_files_item import DirectoryWithParentsAndChildrenResponseFilesItem - - -class DirectoryWithParentsAndChildrenResponse(UncheckedBaseModel): - id: str = pydantic.Field() - """ - String ID of directory. Starts with `dir_`. - """ - - parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. - """ - - name: str = pydantic.Field() - """ - Name of the directory. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the directory. - """ - - path: str = pydantic.Field() - """ - Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the directory. - """ - - tags: typing.List[str] = pydantic.Field() - """ - List of tags associated with the directory. - """ - - created_at: dt.datetime - updated_at: dt.datetime - subdirectories: typing.List[DirectoryResponse] = pydantic.Field() - """ - List of subdirectories in the directory. - """ - - files: typing.List[DirectoryWithParentsAndChildrenResponseFilesItem] = pydantic.Field() - """ - List of files in the directory. - """ - - parents: typing.List[DirectoryResponse] = pydantic.Field() - """ - List of parent directories of the directory. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(DirectoryWithParentsAndChildrenResponse) diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py deleted file mode 100644 index 2c418d75..00000000 --- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponse -from .dataset_response import DatasetResponse -from .evaluator_response import EvaluatorResponse -from .flow_response import FlowResponse -from .prompt_response import PromptResponse -from .tool_response import ToolResponse - -DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[ - PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse -] diff --git a/src/humanloop/types/environment_response.py b/src/humanloop/types/environment_response.py deleted file mode 100644 index 23c0ab8f..00000000 --- a/src/humanloop/types/environment_response.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_tag import EnvironmentTag - - -class EnvironmentResponse(UncheckedBaseModel): - id: str - created_at: dt.datetime - name: str - tag: EnvironmentTag - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/environment_tag.py b/src/humanloop/types/environment_tag.py deleted file mode 100644 index f09bde15..00000000 --- a/src/humanloop/types/environment_tag.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EnvironmentTag = typing.Union[typing.Literal["default", "other"], typing.Any] diff --git a/src/humanloop/types/evaluatee_request.py b/src/humanloop/types/evaluatee_request.py deleted file mode 100644 index a51c07aa..00000000 --- a/src/humanloop/types/evaluatee_request.py +++ /dev/null @@ -1,58 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluateeRequest(UncheckedBaseModel): - """ - Specification of a File version on Humanloop. - - This can be done in a couple of ways: - - Specifying `version_id` directly. - - Specifying a File (and optionally an Environment). - - A File can be specified by either `path` or `file_id`. - - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used. - """ - - version_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the File Version. If provided, none of the other fields should be specified. - """ - - path: typing.Optional[str] = pydantic.Field(default=None) - """ - Path identifying a File. Provide either this or `file_id` if you want to specify a File. - """ - - file_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the File. Provide either this or `path` if you want to specify a File. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used. - """ - - batch_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - orchestrated: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py deleted file mode 100644 index 0a2169e0..00000000 --- a/src/humanloop/types/evaluatee_response.py +++ /dev/null @@ -1,60 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .run_version_response import RunVersionResponse - - -class EvaluateeResponse(UncheckedBaseModel): - """ - Version of the Evaluatee being evaluated. - """ - - version: typing.Optional[RunVersionResponse] = None - batch_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - orchestrated: bool = pydantic.Field() - """ - Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - """ - - pinned: bool = pydantic.Field() - """ - Pinned Evaluatees are shown in Humanloop's Overview, allowing you to use them as baselines for comparison. - """ - - added_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the Evaluatee was added to the Evaluation. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluateeResponse) diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py deleted file mode 100644 index c63ebb8d..00000000 --- a/src/humanloop/types/evaluation_evaluator_response.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluationEvaluatorResponse(UncheckedBaseModel): - version: "EvaluatorResponse" - orchestrated: bool = pydantic.Field() - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ - - added_at: dt.datetime = pydantic.Field() - """ - When the Evaluator was added to the Evaluation. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluationEvaluatorResponse) diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py deleted file mode 100644 index bd2864f2..00000000 --- a/src/humanloop/types/evaluation_log_response.py +++ /dev/null @@ -1,60 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .datapoint_response import DatapointResponse - - -class EvaluationLogResponse(UncheckedBaseModel): - run_id: str = pydantic.Field() - """ - Unique identifier for the Run. - """ - - datapoint: typing.Optional[DatapointResponse] = pydantic.Field(default=None) - """ - The Datapoint used to generate the Log - """ - - log: "LogResponse" = pydantic.Field() - """ - The Log that was evaluated by the Evaluator. - """ - - evaluator_logs: typing.List["LogResponse"] = pydantic.Field() - """ - The Evaluator Logs containing the judgments for the Log. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluationLogResponse) diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py deleted file mode 100644 index dc32e6dc..00000000 --- a/src/humanloop/types/evaluation_response.py +++ /dev/null @@ -1,69 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluation_evaluator_response import EvaluationEvaluatorResponse -from .user_response import UserResponse - - -class EvaluationResponse(UncheckedBaseModel): - id: str = pydantic.Field() - """ - Unique identifier for the Evaluation. Starts with `evr`. - """ - - runs_count: int = pydantic.Field() - """ - The total number of Runs in the Evaluation. - """ - - evaluators: typing.List[EvaluationEvaluatorResponse] = pydantic.Field() - """ - The Evaluator Versions used to evaluate. - """ - - name: typing.Optional[str] = pydantic.Field(default=None) - """ - Name of the Evaluation to help identify it. Must be unique among Evaluations associated with File. - """ - - file_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the File associated with the Evaluation. - """ - - created_at: dt.datetime - created_by: typing.Optional[UserResponse] = None - updated_at: dt.datetime - url: typing.Optional[str] = pydantic.Field(default=None) - """ - URL to view the Evaluation on the Humanloop. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluationResponse) diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py deleted file mode 100644 index d2977f63..00000000 --- a/src/humanloop/types/evaluation_run_response.py +++ /dev/null @@ -1,83 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .dataset_response import DatasetResponse -from .evaluation_status import EvaluationStatus -from .run_version_response import RunVersionResponse -from .user_response import UserResponse - - -class EvaluationRunResponse(UncheckedBaseModel): - id: str = pydantic.Field() - """ - Unique identifier for the Run. - """ - - dataset: typing.Optional[DatasetResponse] = pydantic.Field(default=None) - """ - The Dataset used in the Run. - """ - - version: typing.Optional[RunVersionResponse] = pydantic.Field(default=None) - """ - The version used in the Run. - """ - - orchestrated: bool = pydantic.Field() - """ - Whether the Run is orchestrated by Humanloop. - """ - - added_at: dt.datetime = pydantic.Field() - """ - When the Run was added to the Evaluation. - """ - - created_at: dt.datetime = pydantic.Field() - """ - When the Run was created. - """ - - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The User who created the Run. - """ - - status: EvaluationStatus = pydantic.Field() - """ - The status of the Run. - """ - - control: bool = pydantic.Field() - """ - Stats for other Runs will be displayed in comparison to the control Run. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluationRunResponse) diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py deleted file mode 100644 index e815d1e7..00000000 --- a/src/humanloop/types/evaluation_runs_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluation_run_response import EvaluationRunResponse - - -class EvaluationRunsResponse(UncheckedBaseModel): - runs: typing.List[EvaluationRunResponse] = pydantic.Field() - """ - The Runs in the Evaluation. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluationRunsResponse) diff --git a/src/humanloop/types/evaluation_stats.py b/src/humanloop/types/evaluation_stats.py deleted file mode 100644 index 656d45d0..00000000 --- a/src/humanloop/types/evaluation_stats.py +++ /dev/null @@ -1,40 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluation_status import EvaluationStatus -from .run_stats_response import RunStatsResponse - - -class EvaluationStats(UncheckedBaseModel): - run_stats: typing.List[RunStatsResponse] = pydantic.Field() - """ - Stats for each Run in the Evaluation. - """ - - progress: typing.Optional[str] = pydantic.Field(default=None) - """ - A summary string report of the Evaluation's progress you can print to the command line;helpful when integrating Evaluations with CI/CD. - """ - - report: typing.Optional[str] = pydantic.Field(default=None) - """ - A summary string report of the Evaluation you can print to command line;helpful when integrating Evaluations with CI/CD. - """ - - status: EvaluationStatus = pydantic.Field() - """ - The current status of the Evaluation. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluation_status.py b/src/humanloop/types/evaluation_status.py deleted file mode 100644 index 9e45efbe..00000000 --- a/src/humanloop/types/evaluation_status.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluationStatus = typing.Union[typing.Literal["pending", "running", "completed", "cancelled"], typing.Any] diff --git a/src/humanloop/types/evaluations_dataset_request.py b/src/humanloop/types/evaluations_dataset_request.py deleted file mode 100644 index a59bb83b..00000000 --- a/src/humanloop/types/evaluations_dataset_request.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluationsDatasetRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/evaluations_request.py b/src/humanloop/types/evaluations_request.py deleted file mode 100644 index 640f9b5f..00000000 --- a/src/humanloop/types/evaluations_request.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluationsRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/evaluator_activation_deactivation_request.py b/src/humanloop/types/evaluator_activation_deactivation_request.py deleted file mode 100644 index f9c6023e..00000000 --- a/src/humanloop/types/evaluator_activation_deactivation_request.py +++ /dev/null @@ -1,36 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluator_activation_deactivation_request_activate_item import EvaluatorActivationDeactivationRequestActivateItem -from .evaluator_activation_deactivation_request_deactivate_item import ( - EvaluatorActivationDeactivationRequestDeactivateItem, -) - - -class EvaluatorActivationDeactivationRequest(UncheckedBaseModel): - activate: typing.Optional[typing.List[EvaluatorActivationDeactivationRequestActivateItem]] = pydantic.Field( - default=None - ) - """ - Evaluators to activate for Monitoring. These will be automatically run on new Logs. - """ - - deactivate: typing.Optional[typing.List[EvaluatorActivationDeactivationRequestDeactivateItem]] = pydantic.Field( - default=None - ) - """ - Evaluators to deactivate. These will not be run on new Logs. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py b/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py deleted file mode 100644 index 6d2039b9..00000000 --- a/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest -from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest - -EvaluatorActivationDeactivationRequestActivateItem = typing.Union[ - MonitoringEvaluatorVersionRequest, MonitoringEvaluatorEnvironmentRequest -] diff --git a/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py b/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py deleted file mode 100644 index 6eb65d03..00000000 --- a/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest -from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest - -EvaluatorActivationDeactivationRequestDeactivateItem = typing.Union[ - MonitoringEvaluatorVersionRequest, MonitoringEvaluatorEnvironmentRequest -] diff --git a/src/humanloop/types/evaluator_aggregate.py b/src/humanloop/types/evaluator_aggregate.py deleted file mode 100644 index 5c24915a..00000000 --- a/src/humanloop/types/evaluator_aggregate.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluatorAggregate(UncheckedBaseModel): - value: float = pydantic.Field() - """ - The aggregated value of the evaluator. - """ - - evaluator_id: str = pydantic.Field() - """ - ID of the evaluator. - """ - - evaluator_version_id: str = pydantic.Field() - """ - ID of the evaluator version. - """ - - created_at: dt.datetime - updated_at: dt.datetime - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_arguments_type.py b/src/humanloop/types/evaluator_arguments_type.py deleted file mode 100644 index 56067b4a..00000000 --- a/src/humanloop/types/evaluator_arguments_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluatorArgumentsType = typing.Union[typing.Literal["target_free", "target_required"], typing.Any] diff --git a/src/humanloop/types/evaluator_config_response.py b/src/humanloop/types/evaluator_config_response.py deleted file mode 100644 index 00bd5cd8..00000000 --- a/src/humanloop/types/evaluator_config_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluatorConfigResponse(UncheckedBaseModel): - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_file_id.py b/src/humanloop/types/evaluator_file_id.py deleted file mode 100644 index 6c3b3141..00000000 --- a/src/humanloop/types/evaluator_file_id.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluatorFileId(UncheckedBaseModel): - """ - Base model for specifying an Evaluator for an Evaluation. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the File. - """ - - orchestrated: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_file_path.py b/src/humanloop/types/evaluator_file_path.py deleted file mode 100644 index cd967935..00000000 --- a/src/humanloop/types/evaluator_file_path.py +++ /dev/null @@ -1,37 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluatorFilePath(UncheckedBaseModel): - """ - Base model for specifying an Evaluator for an Evaluation. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - path: str = pydantic.Field() - """ - Path identifying a File. Provide this to specify a File. - """ - - orchestrated: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_judgment_number_limit.py b/src/humanloop/types/evaluator_judgment_number_limit.py deleted file mode 100644 index 289afd4c..00000000 --- a/src/humanloop/types/evaluator_judgment_number_limit.py +++ /dev/null @@ -1,33 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluatorJudgmentNumberLimit(UncheckedBaseModel): - min: typing.Optional[float] = pydantic.Field(default=None) - """ - The minimum value that can be selected. - """ - - max: typing.Optional[float] = pydantic.Field(default=None) - """ - The maximum value that can be selected. - """ - - step: typing.Optional[float] = pydantic.Field(default=None) - """ - The step size for the number input. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_judgment_option_response.py b/src/humanloop/types/evaluator_judgment_option_response.py deleted file mode 100644 index 3b3a78e3..00000000 --- a/src/humanloop/types/evaluator_judgment_option_response.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .valence import Valence - - -class EvaluatorJudgmentOptionResponse(UncheckedBaseModel): - name: str = pydantic.Field() - """ - The name of the option. - """ - - valence: typing.Optional[Valence] = pydantic.Field(default=None) - """ - Whether this option should be considered positive or negative. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py deleted file mode 100644 index e006e7a2..00000000 --- a/src/humanloop/types/evaluator_log_response.py +++ /dev/null @@ -1,201 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage -from .evaluator_log_response_judgment import EvaluatorLogResponseJudgment -from .log_status import LogStatus - - -class EvaluatorLogResponse(UncheckedBaseModel): - """ - General request for creating a Log - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier of the evaluated Log. The newly created Log will have this one set as parent. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the LLM. Only populated for LLM Evaluator Logs. - """ - - judgment: typing.Optional[EvaluatorLogResponseJudgment] = pydantic.Field(default=None) - """ - Evaluator assessment of the Log. - """ - - marked_completed: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the Log has been manually marked as completed by a user. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - evaluator: "EvaluatorResponse" = pydantic.Field() - """ - Evaluator used to generate the judgment. - """ - - parent: typing.Optional["LogResponse"] = pydantic.Field(default=None) - """ - The Log that was evaluated. Only provided if the ?include_parent query parameter is set for the - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluatorLogResponse) diff --git a/src/humanloop/types/evaluator_log_response_judgment.py b/src/humanloop/types/evaluator_log_response_judgment.py deleted file mode 100644 index fd0bbedd..00000000 --- a/src/humanloop/types/evaluator_log_response_judgment.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluatorLogResponseJudgment = typing.Union[bool, str, typing.List[str], float] diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py deleted file mode 100644 index 0af149d3..00000000 --- a/src/humanloop/types/evaluator_response.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -import typing_extensions -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.serialization import FieldMetadata -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_response import EnvironmentResponse -from .evaluator_aggregate import EvaluatorAggregate -from .evaluator_response_spec import EvaluatorResponseSpec -from .input_response import InputResponse -from .user_response import UserResponse - - -class EvaluatorResponse(UncheckedBaseModel): - """ - Version of the Evaluator used to provide judgments. - """ - - path: str = pydantic.Field() - """ - Path of the Evaluator including the Evaluator name, which is used as a unique identifier. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Evaluator. - """ - - directory_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the directory that the file is in on Humanloop. - """ - - version_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique name for the Evaluator version. Version names must be unique for a given Evaluator. - """ - - version_description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the version, e.g., the changes made in this version. - """ - - spec: EvaluatorResponseSpec - name: str = pydantic.Field() - """ - Name of the Evaluator, which is used as a unique identifier. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Evaluator. - """ - - schema_: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") - ] = pydantic.Field(default=None) - """ - The JSON schema for the File. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the file. - """ - - tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - List of tags associated with the file. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the specific Evaluator Version. If no query params provided, the default deployed Evaluator Version is returned. - """ - - type: typing.Optional[typing.Literal["evaluator"]] = None - environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) - """ - The list of environments the Evaluator Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who created the Evaluator. - """ - - last_used_at: dt.datetime - version_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated for this Evaluator Version - """ - - total_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated across all Evaluator Versions - """ - - inputs: typing.List[InputResponse] = pydantic.Field() - """ - Inputs associated to the Evaluator. Inputs correspond to any of the variables used within the Evaluator template. - """ - - evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) - """ - Evaluators that have been attached to this Evaluator that are used for monitoring logs. - """ - - evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) - """ - Aggregation of Evaluator results for the Evaluator Version. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(EvaluatorResponse) diff --git a/src/humanloop/types/evaluator_response_spec.py b/src/humanloop/types/evaluator_response_spec.py deleted file mode 100644 index 45eb1790..00000000 --- a/src/humanloop/types/evaluator_response_spec.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .code_evaluator_request import CodeEvaluatorRequest -from .external_evaluator_request import ExternalEvaluatorRequest -from .human_evaluator_request import HumanEvaluatorRequest -from .llm_evaluator_request import LlmEvaluatorRequest - -EvaluatorResponseSpec = typing.Union[ - LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest -] diff --git a/src/humanloop/types/evaluator_return_type_enum.py b/src/humanloop/types/evaluator_return_type_enum.py deleted file mode 100644 index 052a51d2..00000000 --- a/src/humanloop/types/evaluator_return_type_enum.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluatorReturnTypeEnum = typing.Union[ - typing.Literal["boolean", "number", "select", "multi_select", "text"], typing.Any -] diff --git a/src/humanloop/types/evaluator_version_id.py b/src/humanloop/types/evaluator_version_id.py deleted file mode 100644 index 688acf9a..00000000 --- a/src/humanloop/types/evaluator_version_id.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class EvaluatorVersionId(UncheckedBaseModel): - """ - Base model for specifying an Evaluator for an Evaluation. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the Version. - """ - - orchestrated: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluators_request.py b/src/humanloop/types/evaluators_request.py deleted file mode 100644 index 6c8cef34..00000000 --- a/src/humanloop/types/evaluators_request.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EvaluatorsRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py deleted file mode 100644 index 128eed92..00000000 --- a/src/humanloop/types/event_type.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EventType = typing.Union[ - typing.Literal[ - "agent_turn_start", - "agent_turn_suspend", - "agent_turn_continue", - "agent_turn_end", - "agent_start", - "agent_update", - "agent_end", - "tool_start", - "tool_update", - "tool_end", - "error", - "agent_generation_error", - ], - typing.Any, -] diff --git a/src/humanloop/types/external_evaluator_request.py b/src/humanloop/types/external_evaluator_request.py deleted file mode 100644 index 9f528f67..00000000 --- a/src/humanloop/types/external_evaluator_request.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluator_arguments_type import EvaluatorArgumentsType -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse -from .evaluator_return_type_enum import EvaluatorReturnTypeEnum -from .valence import Valence - - -class ExternalEvaluatorRequest(UncheckedBaseModel): - arguments_type: EvaluatorArgumentsType = pydantic.Field() - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: EvaluatorReturnTypeEnum = pydantic.Field() - """ - The type of the return value of the Evaluator. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) - """ - The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. - """ - - number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing.Optional[Valence] = pydantic.Field(default=None) - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["external"] = "external" - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/feedback_type.py b/src/humanloop/types/feedback_type.py deleted file mode 100644 index 5a964f16..00000000 --- a/src/humanloop/types/feedback_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -FeedbackType = typing.Optional[typing.Any] diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py deleted file mode 100644 index f3a26ef0..00000000 --- a/src/humanloop/types/file_environment_response.py +++ /dev/null @@ -1,51 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_tag import EnvironmentTag -from .file_environment_response_file import FileEnvironmentResponseFile - - -class FileEnvironmentResponse(UncheckedBaseModel): - """ - Response model for the List Environments endpoint under Files. - - Contains the deployed version of the File, if one is deployed to the Environment. - """ - - id: str - created_at: dt.datetime - name: str - tag: EnvironmentTag - file: typing.Optional[FileEnvironmentResponseFile] = pydantic.Field(default=None) - """ - The version of the File that is deployed to the Environment, if one is deployed. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(FileEnvironmentResponse) diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py deleted file mode 100644 index 2725177a..00000000 --- a/src/humanloop/types/file_environment_response_file.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponse -from .dataset_response import DatasetResponse -from .evaluator_response import EvaluatorResponse -from .flow_response import FlowResponse -from .prompt_response import PromptResponse -from .tool_response import ToolResponse - -FileEnvironmentResponseFile = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse -] diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py deleted file mode 100644 index 112e9602..00000000 --- a/src/humanloop/types/file_environment_variable_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class FileEnvironmentVariableRequest(UncheckedBaseModel): - name: str = pydantic.Field() - """ - Name of the environment variable. - """ - - value: str = pydantic.Field() - """ - Value of the environment variable. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_id.py b/src/humanloop/types/file_id.py deleted file mode 100644 index fe049681..00000000 --- a/src/humanloop/types/file_id.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class FileId(UncheckedBaseModel): - """ - Specification of a File by its ID. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the File. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_path.py b/src/humanloop/types/file_path.py deleted file mode 100644 index 3f4f7591..00000000 --- a/src/humanloop/types/file_path.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class FilePath(UncheckedBaseModel): - """ - Specification of a File by its path. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. - """ - - path: str = pydantic.Field() - """ - Path identifying a File. Provide this to specify a File. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_request.py b/src/humanloop/types/file_request.py deleted file mode 100644 index ba9518e2..00000000 --- a/src/humanloop/types/file_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class FileRequest(UncheckedBaseModel): - id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID for an existing File. - """ - - path: typing.Optional[str] = pydantic.Field(default=None) - """ - Path of the File, including the name. This locates the File in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_sort_by.py b/src/humanloop/types/file_sort_by.py deleted file mode 100644 index b3135c3b..00000000 --- a/src/humanloop/types/file_sort_by.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -FileSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any] diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py deleted file mode 100644 index f235825b..00000000 --- a/src/humanloop/types/file_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any] diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py deleted file mode 100644 index 753d9ba2..00000000 --- a/src/humanloop/types/files_tool_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -FilesToolType = typing.Union[ - typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any -] diff --git a/src/humanloop/types/flow_kernel_request.py b/src/humanloop/types/flow_kernel_request.py deleted file mode 100644 index 9b9adec9..00000000 --- a/src/humanloop/types/flow_kernel_request.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class FlowKernelRequest(UncheckedBaseModel): - attributes: typing.Dict[str, typing.Optional[typing.Any]] = pydantic.Field() - """ - A key-value object identifying the Flow Version. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py deleted file mode 100644 index 188c1fdf..00000000 --- a/src/humanloop/types/flow_log_response.py +++ /dev/null @@ -1,185 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage -from .log_status import LogStatus - - -class FlowLogResponse(UncheckedBaseModel): - """ - General request for creating a Log - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - List of chat messages that were used as an input to the Flow. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The output message returned by this Flow. - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the Flow Log. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - flow: "FlowResponse" = pydantic.Field() - """ - Flow used to generate the Log. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(FlowLogResponse) diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py deleted file mode 100644 index 826b9238..00000000 --- a/src/humanloop/types/flow_response.py +++ /dev/null @@ -1,133 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -import typing_extensions -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.serialization import FieldMetadata -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_response import EnvironmentResponse -from .evaluator_aggregate import EvaluatorAggregate -from .user_response import UserResponse - - -class FlowResponse(UncheckedBaseModel): - """ - Response model for a Flow. - """ - - path: str = pydantic.Field() - """ - Path of the Flow, including the name, which is used as a unique identifier. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Flow. - """ - - directory_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the directory that the file is in on Humanloop. - """ - - attributes: typing.Dict[str, typing.Optional[typing.Any]] = pydantic.Field() - """ - A key-value object identifying the Flow Version. - """ - - version_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique name for the Flow version. Version names must be unique for a given Flow. - """ - - version_description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Version. - """ - - name: str = pydantic.Field() - """ - Name of the Flow. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Flow. - """ - - schema_: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") - ] = pydantic.Field(default=None) - """ - The JSON schema for the File. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the file. - """ - - tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - List of tags associated with the file. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the specific Flow Version. If no query params provided, the default deployed Flow Version is returned. - """ - - type: typing.Optional[typing.Literal["flow"]] = None - environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) - """ - The list of environments the Flow Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who created the Flow. - """ - - last_used_at: dt.datetime - version_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated for this Flow Version - """ - - evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) - """ - Aggregation of Evaluator results for the Flow Version. - """ - - evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) - """ - The list of Monitoring Evaluators associated with the Flow Version. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(FlowResponse) diff --git a/src/humanloop/types/function_tool.py b/src/humanloop/types/function_tool.py deleted file mode 100644 index faef2899..00000000 --- a/src/humanloop/types/function_tool.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class FunctionTool(UncheckedBaseModel): - """ - A function tool to be called by the model where user owns runtime. - """ - - name: str - arguments: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/function_tool_choice.py b/src/humanloop/types/function_tool_choice.py deleted file mode 100644 index 43d0eeb6..00000000 --- a/src/humanloop/types/function_tool_choice.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class FunctionToolChoice(UncheckedBaseModel): - """ - A function tool to be called by the model where user owns runtime. - """ - - name: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/http_validation_error.py b/src/humanloop/types/http_validation_error.py deleted file mode 100644 index 188935a0..00000000 --- a/src/humanloop/types/http_validation_error.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .validation_error import ValidationError - - -class HttpValidationError(UncheckedBaseModel): - detail: typing.Optional[typing.List[ValidationError]] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/human_evaluator_request.py b/src/humanloop/types/human_evaluator_request.py deleted file mode 100644 index ef604880..00000000 --- a/src/humanloop/types/human_evaluator_request.py +++ /dev/null @@ -1,59 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluator_arguments_type import EvaluatorArgumentsType -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse -from .human_evaluator_request_return_type import HumanEvaluatorRequestReturnType -from .valence import Valence - - -class HumanEvaluatorRequest(UncheckedBaseModel): - arguments_type: EvaluatorArgumentsType = pydantic.Field() - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: HumanEvaluatorRequestReturnType = pydantic.Field() - """ - The type of the return value of the Evaluator. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) - """ - The options that can be applied as judgments. - """ - - number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing.Optional[Valence] = pydantic.Field(default=None) - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["human"] = "human" - instructions: typing.Optional[str] = pydantic.Field(default=None) - """ - Instructions and guidelines for applying judgments. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/human_evaluator_request_return_type.py b/src/humanloop/types/human_evaluator_request_return_type.py deleted file mode 100644 index c6ae135e..00000000 --- a/src/humanloop/types/human_evaluator_request_return_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -HumanEvaluatorRequestReturnType = typing.Union[ - typing.Literal["select", "multi_select", "text", "number", "boolean"], typing.Any -] diff --git a/src/humanloop/types/image_chat_content.py b/src/humanloop/types/image_chat_content.py deleted file mode 100644 index 9e12716d..00000000 --- a/src/humanloop/types/image_chat_content.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .image_url import ImageUrl - - -class ImageChatContent(UncheckedBaseModel): - type: typing.Literal["image_url"] = "image_url" - image_url: ImageUrl = pydantic.Field() - """ - The message's image content. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/image_url.py b/src/humanloop/types/image_url.py deleted file mode 100644 index ed170dea..00000000 --- a/src/humanloop/types/image_url.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .image_url_detail import ImageUrlDetail - - -class ImageUrl(UncheckedBaseModel): - url: str = pydantic.Field() - """ - Either a URL of the image or the base64 encoded image data. - """ - - detail: typing.Optional[ImageUrlDetail] = pydantic.Field(default=None) - """ - Specify the detail level of the image provided to the model. For more details see: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/image_url_detail.py b/src/humanloop/types/image_url_detail.py deleted file mode 100644 index 43c7a47d..00000000 --- a/src/humanloop/types/image_url_detail.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ImageUrlDetail = typing.Union[typing.Literal["high", "low", "auto"], typing.Any] diff --git a/src/humanloop/types/input_response.py b/src/humanloop/types/input_response.py deleted file mode 100644 index 36cfa6ed..00000000 --- a/src/humanloop/types/input_response.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class InputResponse(UncheckedBaseModel): - name: str = pydantic.Field() - """ - Type of input. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py deleted file mode 100644 index 7ce2bc95..00000000 --- a/src/humanloop/types/linked_file_request.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class LinkedFileRequest(UncheckedBaseModel): - file_id: str - environment_id: typing.Optional[str] = None - version_id: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/linked_tool_response.py b/src/humanloop/types/linked_tool_response.py deleted file mode 100644 index 95bc2492..00000000 --- a/src/humanloop/types/linked_tool_response.py +++ /dev/null @@ -1,48 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class LinkedToolResponse(UncheckedBaseModel): - name: str = pydantic.Field() - """ - Name for the tool referenced by the model. - """ - - description: str = pydantic.Field() - """ - Description of the tool referenced by the model - """ - - strict: typing.Optional[bool] = pydantic.Field(default=None) - """ - If true, forces the model to output json data in the structure of the parameters schema. - """ - - parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Tool linked. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the Tool Version linked. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py deleted file mode 100644 index 526d9b9b..00000000 --- a/src/humanloop/types/list_agents.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ListAgents(UncheckedBaseModel): - records: typing.List["AgentResponse"] = pydantic.Field() - """ - The list of Agents. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(ListAgents) diff --git a/src/humanloop/types/list_datasets.py b/src/humanloop/types/list_datasets.py deleted file mode 100644 index 3e4de370..00000000 --- a/src/humanloop/types/list_datasets.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .dataset_response import DatasetResponse - - -class ListDatasets(UncheckedBaseModel): - records: typing.List[DatasetResponse] = pydantic.Field() - """ - The list of Datasets. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py deleted file mode 100644 index 4ec412cb..00000000 --- a/src/humanloop/types/list_evaluators.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ListEvaluators(UncheckedBaseModel): - records: typing.List["EvaluatorResponse"] = pydantic.Field() - """ - The list of Evaluators. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(ListEvaluators) diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py deleted file mode 100644 index ce407328..00000000 --- a/src/humanloop/types/list_flows.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ListFlows(UncheckedBaseModel): - records: typing.List["FlowResponse"] = pydantic.Field() - """ - The list of Flows. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(ListFlows) diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py deleted file mode 100644 index 42d01cf0..00000000 --- a/src/humanloop/types/list_prompts.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ListPrompts(UncheckedBaseModel): - records: typing.List["PromptResponse"] = pydantic.Field() - """ - The list of Prompts. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(ListPrompts) diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py deleted file mode 100644 index 7b2e7c70..00000000 --- a/src/humanloop/types/list_tools.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ListTools(UncheckedBaseModel): - records: typing.List["ToolResponse"] = pydantic.Field() - """ - The list of Tools. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(ListTools) diff --git a/src/humanloop/types/llm_evaluator_request.py b/src/humanloop/types/llm_evaluator_request.py deleted file mode 100644 index c2061bfa..00000000 --- a/src/humanloop/types/llm_evaluator_request.py +++ /dev/null @@ -1,60 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluator_arguments_type import EvaluatorArgumentsType -from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit -from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse -from .evaluator_return_type_enum import EvaluatorReturnTypeEnum -from .prompt_kernel_request import PromptKernelRequest -from .valence import Valence - - -class LlmEvaluatorRequest(UncheckedBaseModel): - arguments_type: EvaluatorArgumentsType = pydantic.Field() - """ - Whether this Evaluator is target-free or target-required. - """ - - return_type: EvaluatorReturnTypeEnum = pydantic.Field() - """ - The type of the return value of the Evaluator. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. - """ - - options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) - """ - The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. - """ - - number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) - """ - Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. - """ - - number_valence: typing.Optional[Valence] = pydantic.Field(default=None) - """ - The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. - """ - - evaluator_type: typing.Literal["llm"] = "llm" - prompt: typing.Optional[PromptKernelRequest] = pydantic.Field(default=None) - """ - The prompt parameters used to generate. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py deleted file mode 100644 index e6f60fcb..00000000 --- a/src/humanloop/types/log_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -if typing.TYPE_CHECKING: - from .agent_log_response import AgentLogResponse - from .evaluator_log_response import EvaluatorLogResponse - from .flow_log_response import FlowLogResponse - from .prompt_log_response import PromptLogResponse - from .tool_log_response import ToolLogResponse -LogResponse = typing.Union[ - "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse" -] diff --git a/src/humanloop/types/log_status.py b/src/humanloop/types/log_status.py deleted file mode 100644 index a09d61c0..00000000 --- a/src/humanloop/types/log_status.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LogStatus = typing.Union[typing.Literal["complete", "incomplete"], typing.Any] diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py deleted file mode 100644 index 2687e2ea..00000000 --- a/src/humanloop/types/log_stream_response.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_log_stream_response import AgentLogStreamResponse -from .prompt_call_stream_response import PromptCallStreamResponse - -LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse] diff --git a/src/humanloop/types/model_endpoints.py b/src/humanloop/types/model_endpoints.py deleted file mode 100644 index befb9936..00000000 --- a/src/humanloop/types/model_endpoints.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ModelEndpoints = typing.Union[typing.Literal["complete", "chat", "edit"], typing.Any] diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py deleted file mode 100644 index 3f2c99fb..00000000 --- a/src/humanloop/types/model_providers.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ModelProviders = typing.Union[ - typing.Literal[ - "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate" - ], - typing.Any, -] diff --git a/src/humanloop/types/monitoring_evaluator_environment_request.py b/src/humanloop/types/monitoring_evaluator_environment_request.py deleted file mode 100644 index cd3b8491..00000000 --- a/src/humanloop/types/monitoring_evaluator_environment_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class MonitoringEvaluatorEnvironmentRequest(UncheckedBaseModel): - evaluator_id: str = pydantic.Field() - """ - Unique identifier for the Evaluator to be used for monitoring. - """ - - environment_id: str = pydantic.Field() - """ - Unique identifier for the Environment. The Evaluator Version deployed to this Environment will be used for monitoring. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py deleted file mode 100644 index 1c08f955..00000000 --- a/src/humanloop/types/monitoring_evaluator_response.py +++ /dev/null @@ -1,53 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .monitoring_evaluator_state import MonitoringEvaluatorState - - -class MonitoringEvaluatorResponse(UncheckedBaseModel): - version_reference: "VersionReferenceResponse" = pydantic.Field() - """ - The Evaluator Version used for monitoring. This can be a specific Version by ID, or a Version deployed to an Environment. - """ - - version: typing.Optional["EvaluatorResponse"] = pydantic.Field(default=None) - """ - The deployed Version. - """ - - state: MonitoringEvaluatorState = pydantic.Field() - """ - The state of the Monitoring Evaluator. Either `active` or `inactive` - """ - - created_at: dt.datetime - updated_at: dt.datetime - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .version_reference_response import VersionReferenceResponse # noqa: E402, F401, I001 - -update_forward_refs(MonitoringEvaluatorResponse) diff --git a/src/humanloop/types/monitoring_evaluator_state.py b/src/humanloop/types/monitoring_evaluator_state.py deleted file mode 100644 index 550c0fdb..00000000 --- a/src/humanloop/types/monitoring_evaluator_state.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -MonitoringEvaluatorState = typing.Union[typing.Literal["active", "inactive"], typing.Any] diff --git a/src/humanloop/types/monitoring_evaluator_version_request.py b/src/humanloop/types/monitoring_evaluator_version_request.py deleted file mode 100644 index 8adfb290..00000000 --- a/src/humanloop/types/monitoring_evaluator_version_request.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class MonitoringEvaluatorVersionRequest(UncheckedBaseModel): - evaluator_version_id: str = pydantic.Field() - """ - Unique identifier for the Evaluator Version to be used for monitoring. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/numeric_evaluator_stats_response.py b/src/humanloop/types/numeric_evaluator_stats_response.py deleted file mode 100644 index 6bed0547..00000000 --- a/src/humanloop/types/numeric_evaluator_stats_response.py +++ /dev/null @@ -1,53 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class NumericEvaluatorStatsResponse(UncheckedBaseModel): - """ - Base attributes for stats for an Evaluator Version-Evaluated Version pair - in the Evaluation. - """ - - evaluator_version_id: str = pydantic.Field() - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int = pydantic.Field() - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int = pydantic.Field() - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int = pydantic.Field() - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int = pydantic.Field() - """ - The total number of errored Evaluators for this Evaluator Version. - """ - - mean: typing.Optional[float] = None - sum: typing.Optional[float] = None - std: typing.Optional[float] = None - percentiles: typing.Dict[str, float] - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/observability_status.py b/src/humanloop/types/observability_status.py deleted file mode 100644 index a6b6ad71..00000000 --- a/src/humanloop/types/observability_status.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ObservabilityStatus = typing.Union[typing.Literal["pending", "running", "completed", "failed"], typing.Any] diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py deleted file mode 100644 index 3730256e..00000000 --- a/src/humanloop/types/on_agent_call_enum.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any] diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py deleted file mode 100644 index d8c48547..00000000 --- a/src/humanloop/types/open_ai_reasoning_effort.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any] diff --git a/src/humanloop/types/overall_stats.py b/src/humanloop/types/overall_stats.py deleted file mode 100644 index d00145b9..00000000 --- a/src/humanloop/types/overall_stats.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class OverallStats(UncheckedBaseModel): - num_datapoints: int = pydantic.Field() - """ - The total number of Datapoints in the Evaluation's Dataset Version. - """ - - total_logs: int = pydantic.Field() - """ - The total number of Logs in the Evaluation. - """ - - total_evaluator_logs: int = pydantic.Field() - """ - The total number of Evaluator Logs in the Evaluation. - """ - - total_human_evaluator_logs: int = pydantic.Field() - """ - The total number of human Evaluator Logs in the Evaluation Report. - """ - - total_completed_human_evaluator_logs: int = pydantic.Field() - """ - The total number of non-None human Evaluator Logs in the Evaluation Report. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py deleted file mode 100644 index ecc67072..00000000 --- a/src/humanloop/types/paginated_data_agent_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class PaginatedDataAgentResponse(UncheckedBaseModel): - records: typing.List["AgentResponse"] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedDataAgentResponse) diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py deleted file mode 100644 index 17baff0c..00000000 --- a/src/humanloop/types/paginated_data_evaluation_log_response.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluation_log_response import EvaluationLogResponse - - -class PaginatedDataEvaluationLogResponse(UncheckedBaseModel): - records: typing.List[EvaluationLogResponse] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedDataEvaluationLogResponse) diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py deleted file mode 100644 index 47a835e6..00000000 --- a/src/humanloop/types/paginated_data_evaluator_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class PaginatedDataEvaluatorResponse(UncheckedBaseModel): - records: typing.List["EvaluatorResponse"] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedDataEvaluatorResponse) diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py deleted file mode 100644 index 2775ec74..00000000 --- a/src/humanloop/types/paginated_data_flow_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class PaginatedDataFlowResponse(UncheckedBaseModel): - records: typing.List["FlowResponse"] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedDataFlowResponse) diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py deleted file mode 100644 index 1354146d..00000000 --- a/src/humanloop/types/paginated_data_log_response.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class PaginatedDataLogResponse(UncheckedBaseModel): - records: typing.List["LogResponse"] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedDataLogResponse) diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py deleted file mode 100644 index 4487f88f..00000000 --- a/src/humanloop/types/paginated_data_prompt_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class PaginatedDataPromptResponse(UncheckedBaseModel): - records: typing.List["PromptResponse"] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedDataPromptResponse) diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py deleted file mode 100644 index 131ddb69..00000000 --- a/src/humanloop/types/paginated_data_tool_response.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class PaginatedDataToolResponse(UncheckedBaseModel): - records: typing.List["ToolResponse"] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedDataToolResponse) diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py deleted file mode 100644 index f6ee4be8..00000000 --- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, -) - - -class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse( - UncheckedBaseModel -): - records: typing.List[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem - ] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse -) diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py deleted file mode 100644 index ee28a684..00000000 --- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponse -from .dataset_response import DatasetResponse -from .evaluator_response import EvaluatorResponse -from .flow_response import FlowResponse -from .prompt_response import PromptResponse -from .tool_response import ToolResponse - -PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = ( - typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse] -) diff --git a/src/humanloop/types/paginated_datapoint_response.py b/src/humanloop/types/paginated_datapoint_response.py deleted file mode 100644 index c82aa987..00000000 --- a/src/humanloop/types/paginated_datapoint_response.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .datapoint_response import DatapointResponse - - -class PaginatedDatapointResponse(UncheckedBaseModel): - records: typing.List[DatapointResponse] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/paginated_dataset_response.py b/src/humanloop/types/paginated_dataset_response.py deleted file mode 100644 index 689c7276..00000000 --- a/src/humanloop/types/paginated_dataset_response.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .dataset_response import DatasetResponse - - -class PaginatedDatasetResponse(UncheckedBaseModel): - records: typing.List[DatasetResponse] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py deleted file mode 100644 index b9efe745..00000000 --- a/src/humanloop/types/paginated_evaluation_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluation_response import EvaluationResponse - - -class PaginatedEvaluationResponse(UncheckedBaseModel): - records: typing.List[EvaluationResponse] - page: int - size: int - total: int - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PaginatedEvaluationResponse) diff --git a/src/humanloop/types/paginated_prompt_log_response.py b/src/humanloop/types/paginated_prompt_log_response.py deleted file mode 100644 index 50dd56aa..00000000 --- a/src/humanloop/types/paginated_prompt_log_response.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PaginatedPromptLogResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/paginated_session_response.py b/src/humanloop/types/paginated_session_response.py deleted file mode 100644 index aa0bfbca..00000000 --- a/src/humanloop/types/paginated_session_response.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PaginatedSessionResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/platform_access_enum.py b/src/humanloop/types/platform_access_enum.py deleted file mode 100644 index 56da571b..00000000 --- a/src/humanloop/types/platform_access_enum.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PlatformAccessEnum = typing.Union[typing.Literal["superadmin", "supportadmin", "user"], typing.Any] diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py deleted file mode 100644 index b27a3a90..00000000 --- a/src/humanloop/types/populate_template_response.py +++ /dev/null @@ -1,258 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -import typing_extensions -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.serialization import FieldMetadata -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_response import EnvironmentResponse -from .evaluator_aggregate import EvaluatorAggregate -from .input_response import InputResponse -from .linked_tool_response import LinkedToolResponse -from .model_endpoints import ModelEndpoints -from .model_providers import ModelProviders -from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate -from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort -from .populate_template_response_stop import PopulateTemplateResponseStop -from .populate_template_response_template import PopulateTemplateResponseTemplate -from .response_format import ResponseFormat -from .template_language import TemplateLanguage -from .tool_function import ToolFunction -from .user_response import UserResponse - - -class PopulateTemplateResponse(UncheckedBaseModel): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str = pydantic.Field() - """ - Path of the Prompt, including the name, which is used as a unique identifier. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Prompt. - """ - - directory_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the directory that the file is in on Humanloop. - """ - - model: str = pydantic.Field() - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) - """ - The provider model endpoint used. - """ - - template: typing.Optional[PopulateTemplateResponseTemplate] = pydantic.Field(default=None) - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) - """ - The template language to use for rendering the template. - """ - - provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) - """ - The company providing the underlying model service. - """ - - max_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing.Optional[float] = pydantic.Field(default=None) - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing.Optional[float] = pydantic.Field(default=None) - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing.Optional[PopulateTemplateResponseStop] = pydantic.Field(default=None) - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing.Optional[int] = pydantic.Field(default=None) - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None) - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) - """ - The tool specification that the model can choose to call if Tool calling is supported. - """ - - linked_tools: typing.Optional[typing.List[LinkedToolResponse]] = pydantic.Field(default=None) - """ - The tools linked to your prompt that the model can call. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - version_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique name for the Prompt version. Version names must be unique for a given Prompt. - """ - - version_description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the version, e.g., the changes made in this version. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Prompt. - """ - - tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - List of tags associated with the file. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the file. - """ - - name: str = pydantic.Field() - """ - Name of the Prompt. - """ - - schema_: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") - ] = pydantic.Field(default=None) - """ - The JSON schema for the Prompt. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. - """ - - type: typing.Optional[typing.Literal["prompt"]] = None - environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) - """ - The list of environments the Prompt Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who created the Prompt. - """ - - last_used_at: dt.datetime - version_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated for this Prompt Version - """ - - total_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated across all Prompt Versions - """ - - inputs: typing.List[InputResponse] = pydantic.Field() - """ - Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. - """ - - evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) - """ - Evaluators that have been attached to this Prompt that are used for monitoring logs. - """ - - evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) - """ - Aggregation of Evaluator results for the Prompt Version. - """ - - raw_file_content: typing.Optional[str] = pydantic.Field(default=None) - """ - The raw content of the Prompt. Corresponds to the .prompt file. - """ - - populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None) - """ - The template populated with the input values you provided in the request. Returns None if no template exists. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PopulateTemplateResponse) diff --git a/src/humanloop/types/populate_template_response_populated_template.py b/src/humanloop/types/populate_template_response_populated_template.py deleted file mode 100644 index 21a714b9..00000000 --- a/src/humanloop/types/populate_template_response_populated_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessage - -PopulateTemplateResponsePopulatedTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py deleted file mode 100644 index af02db55..00000000 --- a/src/humanloop/types/populate_template_response_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .open_ai_reasoning_effort import OpenAiReasoningEffort - -PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/populate_template_response_stop.py b/src/humanloop/types/populate_template_response_stop.py deleted file mode 100644 index 5613c780..00000000 --- a/src/humanloop/types/populate_template_response_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PopulateTemplateResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/populate_template_response_template.py b/src/humanloop/types/populate_template_response_template.py deleted file mode 100644 index d3b10e2e..00000000 --- a/src/humanloop/types/populate_template_response_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessage - -PopulateTemplateResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/prompt_call_log_response.py b/src/humanloop/types/prompt_call_log_response.py deleted file mode 100644 index 2ec71bf1..00000000 --- a/src/humanloop/types/prompt_call_log_response.py +++ /dev/null @@ -1,89 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage - - -class PromptCallLogResponse(UncheckedBaseModel): - """ - Sample specific response details for a Prompt call - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the provider. - """ - - prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing.Optional[str] = pydantic.Field(default=None) - """ - Reason the generation finished. - """ - - index: int = pydantic.Field() - """ - The index of the sample in the batch. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py deleted file mode 100644 index f20ce5f6..00000000 --- a/src/humanloop/types/prompt_call_response.py +++ /dev/null @@ -1,136 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage -from .log_status import LogStatus -from .prompt_call_log_response import PromptCallLogResponse -from .prompt_call_response_tool_choice import PromptCallResponseToolChoice - - -class PromptCallResponse(UncheckedBaseModel): - """ - Response model for a Prompt call with potentially multiple log samples. - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing.Optional[PromptCallResponseToolChoice] = pydantic.Field(default=None) - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - prompt: "PromptResponse" = pydantic.Field() - """ - Prompt used to generate the Log. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - ID of the log. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the Trace containing the Prompt Call Log. - """ - - logs: typing.List[PromptCallLogResponse] = pydantic.Field() - """ - The logs generated by the Prompt call. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PromptCallResponse) diff --git a/src/humanloop/types/prompt_call_response_tool_choice.py b/src/humanloop/types/prompt_call_response_tool_choice.py deleted file mode 100644 index 7cb07ccc..00000000 --- a/src/humanloop/types/prompt_call_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoice - -PromptCallResponseToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/types/prompt_call_stream_response.py b/src/humanloop/types/prompt_call_stream_response.py deleted file mode 100644 index 48fffdee..00000000 --- a/src/humanloop/types/prompt_call_stream_response.py +++ /dev/null @@ -1,104 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage - - -class PromptCallStreamResponse(UncheckedBaseModel): - """ - Response model for calling Prompt in streaming mode. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the provider. - """ - - prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing.Optional[str] = pydantic.Field(default=None) - """ - Reason the generation finished. - """ - - index: int = pydantic.Field() - """ - The index of the sample in the batch. - """ - - id: str = pydantic.Field() - """ - ID of the log. - """ - - prompt_id: str = pydantic.Field() - """ - ID of the Prompt the log belongs to. - """ - - version_id: str = pydantic.Field() - """ - ID of the specific version of the Prompt. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py deleted file mode 100644 index 03e5c624..00000000 --- a/src/humanloop/types/prompt_kernel_request.py +++ /dev/null @@ -1,127 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .model_endpoints import ModelEndpoints -from .model_providers import ModelProviders -from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort -from .prompt_kernel_request_stop import PromptKernelRequestStop -from .prompt_kernel_request_template import PromptKernelRequestTemplate -from .response_format import ResponseFormat -from .template_language import TemplateLanguage -from .tool_function import ToolFunction - - -class PromptKernelRequest(UncheckedBaseModel): - """ - Base class used by both PromptKernelRequest and AgentKernelRequest. - - Contains the consistent Prompt-related fields. - """ - - model: str = pydantic.Field() - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) - """ - The provider model endpoint used. - """ - - template: typing.Optional[PromptKernelRequestTemplate] = pydantic.Field(default=None) - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) - """ - The template language to use for rendering the template. - """ - - provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) - """ - The company providing the underlying model service. - """ - - max_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing.Optional[float] = pydantic.Field(default=None) - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing.Optional[float] = pydantic.Field(default=None) - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing.Optional[PromptKernelRequestStop] = pydantic.Field(default=None) - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing.Optional[int] = pydantic.Field(default=None) - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None) - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) - """ - The tool specification that the model can choose to call if Tool calling is supported. - """ - - linked_tools: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py deleted file mode 100644 index b5fb8879..00000000 --- a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .open_ai_reasoning_effort import OpenAiReasoningEffort - -PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/prompt_kernel_request_stop.py b/src/humanloop/types/prompt_kernel_request_stop.py deleted file mode 100644 index a4e55eb9..00000000 --- a/src/humanloop/types/prompt_kernel_request_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PromptKernelRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/prompt_kernel_request_template.py b/src/humanloop/types/prompt_kernel_request_template.py deleted file mode 100644 index 59cf99d3..00000000 --- a/src/humanloop/types/prompt_kernel_request_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessage - -PromptKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py deleted file mode 100644 index 8bea9781..00000000 --- a/src/humanloop/types/prompt_log_response.py +++ /dev/null @@ -1,225 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage -from .log_status import LogStatus -from .prompt_log_response_tool_choice import PromptLogResponseToolChoice - - -class PromptLogResponse(UncheckedBaseModel): - """ - General request for creating a Log - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the provider. - """ - - prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the prompt used to generate the output. - """ - - reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of reasoning tokens used to generate the output. - """ - - output_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - Number of tokens in the output generated by the model. - """ - - prompt_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the prompt. - """ - - output_cost: typing.Optional[float] = pydantic.Field(default=None) - """ - Cost in dollars associated to the tokens in the output. - """ - - finish_reason: typing.Optional[str] = pydantic.Field(default=None) - """ - Reason the generation finished. - """ - - messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) - """ - The messages passed to the to provider chat endpoint. - """ - - tool_choice: typing.Optional[PromptLogResponseToolChoice] = pydantic.Field(default=None) - """ - Controls how the model uses tools. The following options are supported: - - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model must call one or more of the provided tools. - - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - """ - - prompt: "PromptResponse" = pydantic.Field() - """ - Prompt used to generate the Log. - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(PromptLogResponse) diff --git a/src/humanloop/types/prompt_log_response_tool_choice.py b/src/humanloop/types/prompt_log_response_tool_choice.py deleted file mode 100644 index e7acf4bb..00000000 --- a/src/humanloop/types/prompt_log_response_tool_choice.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .tool_choice import ToolChoice - -PromptLogResponseToolChoice = typing.Union[ - typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice -] diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py deleted file mode 100644 index 1a2b1490..00000000 --- a/src/humanloop/types/prompt_response.py +++ /dev/null @@ -1,251 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -import typing_extensions -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.serialization import FieldMetadata -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_response import EnvironmentResponse -from .evaluator_aggregate import EvaluatorAggregate -from .input_response import InputResponse -from .linked_tool_response import LinkedToolResponse -from .model_endpoints import ModelEndpoints -from .model_providers import ModelProviders -from .prompt_response_reasoning_effort import PromptResponseReasoningEffort -from .prompt_response_stop import PromptResponseStop -from .prompt_response_template import PromptResponseTemplate -from .response_format import ResponseFormat -from .template_language import TemplateLanguage -from .tool_function import ToolFunction -from .user_response import UserResponse - - -class PromptResponse(UncheckedBaseModel): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str = pydantic.Field() - """ - Path of the Prompt, including the name, which is used as a unique identifier. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Prompt. - """ - - directory_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the directory that the file is in on Humanloop. - """ - - model: str = pydantic.Field() - """ - The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - """ - - endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) - """ - The provider model endpoint used. - """ - - template: typing.Optional[PromptResponseTemplate] = pydantic.Field(default=None) - """ - The template contains the main structure and instructions for the model, including input variables for dynamic values. - - For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. - For completion models, provide a prompt template as a string. - - Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - """ - - template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) - """ - The template language to use for rendering the template. - """ - - provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) - """ - The company providing the underlying model service. - """ - - max_tokens: typing.Optional[int] = pydantic.Field(default=None) - """ - The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - """ - - temperature: typing.Optional[float] = pydantic.Field(default=None) - """ - What sampling temperature to use when making a generation. Higher values means the model will be more creative. - """ - - top_p: typing.Optional[float] = pydantic.Field(default=None) - """ - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - """ - - stop: typing.Optional[PromptResponseStop] = pydantic.Field(default=None) - """ - The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - """ - - presence_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - """ - - frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - """ - - other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Other parameter values to be passed to the provider call. - """ - - seed: typing.Optional[int] = pydantic.Field(default=None) - """ - If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - """ - - response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) - """ - The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - """ - - reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None) - """ - Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. - """ - - tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) - """ - The tool specification that the model can choose to call if Tool calling is supported. - """ - - linked_tools: typing.Optional[typing.List[LinkedToolResponse]] = pydantic.Field(default=None) - """ - The tools linked to your prompt that the model can call. - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - """ - - version_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique name for the Prompt version. Version names must be unique for a given Prompt. - """ - - version_description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the version, e.g., the changes made in this version. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Prompt. - """ - - tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - List of tags associated with the file. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the file. - """ - - name: str = pydantic.Field() - """ - Name of the Prompt. - """ - - schema_: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") - ] = pydantic.Field(default=None) - """ - The JSON schema for the Prompt. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. - """ - - type: typing.Optional[typing.Literal["prompt"]] = None - environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) - """ - The list of environments the Prompt Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who created the Prompt. - """ - - last_used_at: dt.datetime - version_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated for this Prompt Version - """ - - total_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated across all Prompt Versions - """ - - inputs: typing.List[InputResponse] = pydantic.Field() - """ - Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. - """ - - evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) - """ - Evaluators that have been attached to this Prompt that are used for monitoring logs. - """ - - evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) - """ - Aggregation of Evaluator results for the Prompt Version. - """ - - raw_file_content: typing.Optional[str] = pydantic.Field(default=None) - """ - The raw content of the Prompt. Corresponds to the .prompt file. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(PromptResponse) diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py deleted file mode 100644 index 86e9e7ad..00000000 --- a/src/humanloop/types/prompt_response_reasoning_effort.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .open_ai_reasoning_effort import OpenAiReasoningEffort - -PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/prompt_response_stop.py b/src/humanloop/types/prompt_response_stop.py deleted file mode 100644 index 664140be..00000000 --- a/src/humanloop/types/prompt_response_stop.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PromptResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/prompt_response_template.py b/src/humanloop/types/prompt_response_template.py deleted file mode 100644 index 8a89bc04..00000000 --- a/src/humanloop/types/prompt_response_template.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .chat_message import ChatMessage - -PromptResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/provider_api_keys.py b/src/humanloop/types/provider_api_keys.py deleted file mode 100644 index 49bf8731..00000000 --- a/src/humanloop/types/provider_api_keys.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -import typing_extensions -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.serialization import FieldMetadata -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ProviderApiKeys(UncheckedBaseModel): - openai: typing.Optional[str] = None - ai_21: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="ai21")] = None - mock: typing.Optional[str] = None - anthropic: typing.Optional[str] = None - deepseek: typing.Optional[str] = None - bedrock: typing.Optional[str] = None - cohere: typing.Optional[str] = None - openai_azure: typing.Optional[str] = None - openai_azure_endpoint: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/response_format.py b/src/humanloop/types/response_format.py deleted file mode 100644 index 287019c4..00000000 --- a/src/humanloop/types/response_format.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .response_format_type import ResponseFormatType - - -class ResponseFormat(UncheckedBaseModel): - """ - Response format of the model. - """ - - type: ResponseFormatType - json_schema: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The JSON schema of the response format if type is json_schema. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/response_format_type.py b/src/humanloop/types/response_format_type.py deleted file mode 100644 index 3ecf0a30..00000000 --- a/src/humanloop/types/response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ResponseFormatType = typing.Union[typing.Literal["json_object", "json_schema"], typing.Any] diff --git a/src/humanloop/types/run_stats_response.py b/src/humanloop/types/run_stats_response.py deleted file mode 100644 index 3e385b26..00000000 --- a/src/humanloop/types/run_stats_response.py +++ /dev/null @@ -1,54 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .evaluation_status import EvaluationStatus -from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItem - - -class RunStatsResponse(UncheckedBaseModel): - """ - Stats for a Run in the Evaluation. - """ - - run_id: str = pydantic.Field() - """ - Unique identifier for the Run. - """ - - version_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the evaluated Version. - """ - - batch_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - num_logs: int = pydantic.Field() - """ - The total number of existing Logs in this Run. - """ - - evaluator_stats: typing.List[RunStatsResponseEvaluatorStatsItem] = pydantic.Field() - """ - Stats for each Evaluator Version applied to this Run. - """ - - status: EvaluationStatus = pydantic.Field() - """ - The current status of the Run. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/run_stats_response_evaluator_stats_item.py b/src/humanloop/types/run_stats_response_evaluator_stats_item.py deleted file mode 100644 index 697efb12..00000000 --- a/src/humanloop/types/run_stats_response_evaluator_stats_item.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse -from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse -from .select_evaluator_stats_response import SelectEvaluatorStatsResponse -from .text_evaluator_stats_response import TextEvaluatorStatsResponse - -RunStatsResponseEvaluatorStatsItem = typing.Union[ - NumericEvaluatorStatsResponse, - BooleanEvaluatorStatsResponse, - SelectEvaluatorStatsResponse, - TextEvaluatorStatsResponse, -] diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py deleted file mode 100644 index 703bea5f..00000000 --- a/src/humanloop/types/run_version_response.py +++ /dev/null @@ -1,11 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .agent_response import AgentResponse -from .evaluator_response import EvaluatorResponse -from .flow_response import FlowResponse -from .prompt_response import PromptResponse -from .tool_response import ToolResponse - -RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse] diff --git a/src/humanloop/types/select_evaluator_stats_response.py b/src/humanloop/types/select_evaluator_stats_response.py deleted file mode 100644 index 14068fef..00000000 --- a/src/humanloop/types/select_evaluator_stats_response.py +++ /dev/null @@ -1,52 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class SelectEvaluatorStatsResponse(UncheckedBaseModel): - """ - Also used for 'multi_select' Evaluator versions - """ - - evaluator_version_id: str = pydantic.Field() - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int = pydantic.Field() - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int = pydantic.Field() - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int = pydantic.Field() - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int = pydantic.Field() - """ - The total number of errored Evaluators for this Evaluator Version. - """ - - num_judgments_per_option: typing.Dict[str, int] = pydantic.Field() - """ - The total number of Evaluator judgments for this Evaluator Version. This is a mapping of the option name to the number of judgments for that option. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/sort_order.py b/src/humanloop/types/sort_order.py deleted file mode 100644 index 6dc54931..00000000 --- a/src/humanloop/types/sort_order.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SortOrder = typing.Union[typing.Literal["asc", "desc"], typing.Any] diff --git a/src/humanloop/types/template_language.py b/src/humanloop/types/template_language.py deleted file mode 100644 index 4b464b79..00000000 --- a/src/humanloop/types/template_language.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TemplateLanguage = typing.Union[typing.Literal["default", "jinja"], typing.Any] diff --git a/src/humanloop/types/text_chat_content.py b/src/humanloop/types/text_chat_content.py deleted file mode 100644 index 0db5d057..00000000 --- a/src/humanloop/types/text_chat_content.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class TextChatContent(UncheckedBaseModel): - type: typing.Literal["text"] = "text" - text: str = pydantic.Field() - """ - The message's text content. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/text_evaluator_stats_response.py b/src/humanloop/types/text_evaluator_stats_response.py deleted file mode 100644 index bb38f996..00000000 --- a/src/humanloop/types/text_evaluator_stats_response.py +++ /dev/null @@ -1,48 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class TextEvaluatorStatsResponse(UncheckedBaseModel): - """ - Base attributes for stats for an Evaluator Version-Evaluated Version pair - in the Evaluation. - """ - - evaluator_version_id: str = pydantic.Field() - """ - Unique identifier for the Evaluator Version. - """ - - total_logs: int = pydantic.Field() - """ - The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. - """ - - num_judgments: int = pydantic.Field() - """ - The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. - """ - - num_nulls: int = pydantic.Field() - """ - The total number of null judgments (i.e. abstentions) for this Evaluator Version. - """ - - num_errors: int = pydantic.Field() - """ - The total number of errored Evaluators for this Evaluator Version. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/time_unit.py b/src/humanloop/types/time_unit.py deleted file mode 100644 index 57454139..00000000 --- a/src/humanloop/types/time_unit.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TimeUnit = typing.Union[typing.Literal["day", "week", "month"], typing.Any] diff --git a/src/humanloop/types/tool_call.py b/src/humanloop/types/tool_call.py deleted file mode 100644 index 11c9d7e9..00000000 --- a/src/humanloop/types/tool_call.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_tool_type import ChatToolType -from .function_tool import FunctionTool - - -class ToolCall(UncheckedBaseModel): - """ - A tool call to be made. - """ - - id: str - type: ChatToolType = "function" - function: FunctionTool - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py deleted file mode 100644 index d3b660e1..00000000 --- a/src/humanloop/types/tool_call_response.py +++ /dev/null @@ -1,175 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .log_status import LogStatus - - -class ToolCallResponse(UncheckedBaseModel): - """ - Response model for a Tool call. - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - tool: "ToolResponse" = pydantic.Field() - """ - Tool used to generate the Log. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - ID of the log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the Trace containing the Tool Call Log. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(ToolCallResponse) diff --git a/src/humanloop/types/tool_choice.py b/src/humanloop/types/tool_choice.py deleted file mode 100644 index fad59550..00000000 --- a/src/humanloop/types/tool_choice.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_tool_type import ChatToolType -from .function_tool_choice import FunctionToolChoice - - -class ToolChoice(UncheckedBaseModel): - """ - Tool choice to force the model to use a tool. - """ - - type: ChatToolType = "function" - function: FunctionToolChoice - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_function.py b/src/humanloop/types/tool_function.py deleted file mode 100644 index e0a29165..00000000 --- a/src/humanloop/types/tool_function.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ToolFunction(UncheckedBaseModel): - name: str = pydantic.Field() - """ - Name for the tool referenced by the model. - """ - - description: str = pydantic.Field() - """ - Description of the tool referenced by the model - """ - - strict: typing.Optional[bool] = pydantic.Field(default=None) - """ - If true, forces the model to output json data in the structure of the parameters schema. - """ - - parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_kernel_request.py b/src/humanloop/types/tool_kernel_request.py deleted file mode 100644 index 3e2d4afe..00000000 --- a/src/humanloop/types/tool_kernel_request.py +++ /dev/null @@ -1,39 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .tool_function import ToolFunction - - -class ToolKernelRequest(UncheckedBaseModel): - function: typing.Optional[ToolFunction] = pydantic.Field(default=None) - """ - Callable function specification of the Tool shown to the model for tool calling. - """ - - source_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Code source of the Tool. - """ - - setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py deleted file mode 100644 index 2524eb5b..00000000 --- a/src/humanloop/types/tool_log_response.py +++ /dev/null @@ -1,180 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .chat_message import ChatMessage -from .log_status import LogStatus - - -class ToolLogResponse(UncheckedBaseModel): - """ - General request for creating a Log - """ - - start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event started. - """ - - end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - When the logged event ended. - """ - - output: typing.Optional[str] = pydantic.Field(default=None) - """ - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - """ - - created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - User defined timestamp for when the log was created. - """ - - error: typing.Optional[str] = pydantic.Field(default=None) - """ - Error message if the log is an error. - """ - - provider_latency: typing.Optional[float] = pydantic.Field(default=None) - """ - Duration of the logged event in seconds. - """ - - stdout: typing.Optional[str] = pydantic.Field(default=None) - """ - Captured log and debug statements. - """ - - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw request sent to provider. - """ - - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Raw response received the provider. - """ - - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - The inputs passed to the prompt template. - """ - - source: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifies where the model was called from. - """ - - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Any additional metadata to record. - """ - - log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) - """ - Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. - """ - - source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - """ - - trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) - """ - The ID of the parent Log to nest this Log under in a Trace. - """ - - batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations - """ - - user: typing.Optional[str] = pydantic.Field(default=None) - """ - End-user ID related to the Log. - """ - - environment: typing.Optional[str] = pydantic.Field(default=None) - """ - The name of the Environment the Log is associated to. - """ - - save: typing.Optional[bool] = pydantic.Field(default=None) - """ - Whether the request/response payloads will be stored on Humanloop. - """ - - log_id: typing.Optional[str] = pydantic.Field(default=None) - """ - This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Log. - """ - - evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() - """ - List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. - """ - - trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Flow that the Trace belongs to. - """ - - trace_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Identifier for the Trace that the Log belongs to. - """ - - trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) - """ - Logs nested under this Log in the Trace. - """ - - tool: "ToolResponse" = pydantic.Field() - """ - Tool used to generate the Log. - """ - - output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) - """ - The message returned by the Tool. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .log_response import LogResponse # noqa: E402, F401, I001 - -update_forward_refs(ToolLogResponse) diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py deleted file mode 100644 index b2bca04b..00000000 --- a/src/humanloop/types/tool_response.py +++ /dev/null @@ -1,165 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import datetime as dt -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_response import EnvironmentResponse -from .evaluator_aggregate import EvaluatorAggregate -from .files_tool_type import FilesToolType -from .input_response import InputResponse -from .tool_function import ToolFunction -from .user_response import UserResponse - - -class ToolResponse(UncheckedBaseModel): - """ - Base type that all File Responses should inherit from. - - Attributes defined here are common to all File Responses and should be overridden - in the inheriting classes with documentation and appropriate Field definitions. - """ - - path: str = pydantic.Field() - """ - Path of the Tool, including the name, which is used as a unique identifier. - """ - - id: str = pydantic.Field() - """ - Unique identifier for the Tool. - """ - - directory_id: typing.Optional[str] = pydantic.Field(default=None) - """ - ID of the directory that the file is in on Humanloop. - """ - - function: typing.Optional[ToolFunction] = pydantic.Field(default=None) - """ - Callable function specification of the Tool shown to the model for tool calling. - """ - - source_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Code source of the Tool. - """ - - setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - """ - - attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) - """ - Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - """ - - tool_type: typing.Optional[FilesToolType] = pydantic.Field(default=None) - """ - Type of Tool. - """ - - version_name: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for this Tool version. Each Tool can only have one version with a given name. - """ - - version_description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Version. - """ - - name: str = pydantic.Field() - """ - Name of the Tool, which is used as a unique identifier. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the Tool. - """ - - readme: typing.Optional[str] = pydantic.Field(default=None) - """ - Long description of the file. - """ - - tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - List of tags associated with the file. - """ - - version_id: str = pydantic.Field() - """ - Unique identifier for the specific Tool Version. If no query params provided, the default deployed Tool Version is returned. - """ - - type: typing.Optional[typing.Literal["tool"]] = None - environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) - """ - The list of environments the Tool Version is deployed to. - """ - - created_at: dt.datetime - updated_at: dt.datetime - created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who created the Tool. - """ - - last_used_at: dt.datetime - version_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated for this Tool Version - """ - - total_logs_count: int = pydantic.Field() - """ - The number of logs that have been generated across all Tool Versions - """ - - inputs: typing.List[InputResponse] = pydantic.Field() - """ - Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Tool template. - """ - - evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) - """ - Evaluators that have been attached to this Tool that are used for monitoring logs. - """ - - signature: typing.Optional[str] = pydantic.Field(default=None) - """ - Signature of the Tool. - """ - - evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) - """ - Aggregation of Evaluator results for the Tool Version. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 - -update_forward_refs(ToolResponse) diff --git a/src/humanloop/types/update_dateset_action.py b/src/humanloop/types/update_dateset_action.py deleted file mode 100644 index d5264274..00000000 --- a/src/humanloop/types/update_dateset_action.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -UpdateDatesetAction = typing.Union[typing.Literal["set", "add", "remove"], typing.Any] diff --git a/src/humanloop/types/update_evaluation_status_request.py b/src/humanloop/types/update_evaluation_status_request.py deleted file mode 100644 index cb507b69..00000000 --- a/src/humanloop/types/update_evaluation_status_request.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -UpdateEvaluationStatusRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/update_version_request.py b/src/humanloop/types/update_version_request.py deleted file mode 100644 index 0587c889..00000000 --- a/src/humanloop/types/update_version_request.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class UpdateVersionRequest(UncheckedBaseModel): - name: typing.Optional[str] = pydantic.Field(default=None) - """ - Name of the version. - """ - - description: typing.Optional[str] = pydantic.Field(default=None) - """ - Description of the version. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/user_response.py b/src/humanloop/types/user_response.py deleted file mode 100644 index 5f41c81d..00000000 --- a/src/humanloop/types/user_response.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -UserResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/valence.py b/src/humanloop/types/valence.py deleted file mode 100644 index 4779efc2..00000000 --- a/src/humanloop/types/valence.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -Valence = typing.Union[typing.Literal["positive", "negative", "neutral"], typing.Any] diff --git a/src/humanloop/types/validation_error.py b/src/humanloop/types/validation_error.py deleted file mode 100644 index 0438bc05..00000000 --- a/src/humanloop/types/validation_error.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .validation_error_loc_item import ValidationErrorLocItem - - -class ValidationError(UncheckedBaseModel): - loc: typing.List[ValidationErrorLocItem] - msg: str - type: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/validation_error_loc_item.py b/src/humanloop/types/validation_error_loc_item.py deleted file mode 100644 index 9a0a83fe..00000000 --- a/src/humanloop/types/validation_error_loc_item.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ValidationErrorLocItem = typing.Union[str, int] diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py deleted file mode 100644 index fdee59de..00000000 --- a/src/humanloop/types/version_deployment_response.py +++ /dev/null @@ -1,50 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel -from .environment_response import EnvironmentResponse - - -class VersionDeploymentResponse(UncheckedBaseModel): - """ - A variable reference to the Version deployed to an Environment - """ - - file: "VersionDeploymentResponseFile" = pydantic.Field() - """ - The File that the deployed Version belongs to. - """ - - environment: EnvironmentResponse = pydantic.Field() - """ - The Environment that the Version is deployed to. - """ - - type: typing.Literal["environment"] = "environment" - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 -from .version_deployment_response_file import VersionDeploymentResponseFile # noqa: E402, F401, I001 - -update_forward_refs(VersionDeploymentResponse) diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py deleted file mode 100644 index 130f2c1c..00000000 --- a/src/humanloop/types/version_deployment_response_file.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .dataset_response import DatasetResponse - -if typing.TYPE_CHECKING: - from .agent_response import AgentResponse - from .evaluator_response import EvaluatorResponse - from .flow_response import FlowResponse - from .prompt_response import PromptResponse - from .tool_response import ToolResponse -VersionDeploymentResponseFile = typing.Union[ - "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" -] diff --git a/src/humanloop/types/version_id.py b/src/humanloop/types/version_id.py deleted file mode 100644 index 51de3db1..00000000 --- a/src/humanloop/types/version_id.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class VersionId(UncheckedBaseModel): - version_id: str = pydantic.Field() - """ - Unique identifier for the Version. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py deleted file mode 100644 index 47aa53db..00000000 --- a/src/humanloop/types/version_id_response.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs -from ..core.unchecked_base_model import UncheckedBaseModel - - -class VersionIdResponse(UncheckedBaseModel): - """ - A reference to a specific Version by its ID - """ - - version: "VersionIdResponseVersion" = pydantic.Field() - """ - The specific Version being referenced. - """ - - type: typing.Literal["version"] = "version" - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 -from .agent_response import AgentResponse # noqa: E402, F401, I001 -from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 -from .flow_response import FlowResponse # noqa: E402, F401, I001 -from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 -from .prompt_response import PromptResponse # noqa: E402, F401, I001 -from .tool_response import ToolResponse # noqa: E402, F401, I001 -from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 -from .version_id_response_version import VersionIdResponseVersion # noqa: E402, F401, I001 - -update_forward_refs(VersionIdResponse) diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py deleted file mode 100644 index eff8378c..00000000 --- a/src/humanloop/types/version_id_response_version.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -from .dataset_response import DatasetResponse - -if typing.TYPE_CHECKING: - from .agent_response import AgentResponse - from .evaluator_response import EvaluatorResponse - from .flow_response import FlowResponse - from .prompt_response import PromptResponse - from .tool_response import ToolResponse -VersionIdResponseVersion = typing.Union[ - "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" -] diff --git a/src/humanloop/types/version_reference_response.py b/src/humanloop/types/version_reference_response.py deleted file mode 100644 index 7785a8f1..00000000 --- a/src/humanloop/types/version_reference_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -if typing.TYPE_CHECKING: - from .version_deployment_response import VersionDeploymentResponse - from .version_id_response import VersionIdResponse -VersionReferenceResponse = typing.Union["VersionDeploymentResponse", "VersionIdResponse"] diff --git a/src/humanloop/types/version_stats_response.py b/src/humanloop/types/version_stats_response.py deleted file mode 100644 index be1c6286..00000000 --- a/src/humanloop/types/version_stats_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem - - -class VersionStatsResponse(UncheckedBaseModel): - version_id: str = pydantic.Field() - """ - Unique identifier for the evaluated Version. - """ - - batch_id: typing.Optional[str] = pydantic.Field(default=None) - """ - Unique identifier for the batch of Logs to include in the Evaluation. - """ - - num_logs: int = pydantic.Field() - """ - The total number of existing Logs in this Run. - """ - - evaluator_version_stats: typing.List[VersionStatsResponseEvaluatorVersionStatsItem] = pydantic.Field() - """ - Stats for each Evaluator Version applied to this Run. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py b/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py deleted file mode 100644 index a7b9fb21..00000000 --- a/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse -from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse -from .select_evaluator_stats_response import SelectEvaluatorStatsResponse -from .text_evaluator_stats_response import TextEvaluatorStatsResponse - -VersionStatsResponseEvaluatorVersionStatsItem = typing.Union[ - NumericEvaluatorStatsResponse, - BooleanEvaluatorStatsResponse, - SelectEvaluatorStatsResponse, - TextEvaluatorStatsResponse, -] diff --git a/src/humanloop/types/version_status.py b/src/humanloop/types/version_status.py deleted file mode 100644 index fd31428e..00000000 --- a/src/humanloop/types/version_status.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VersionStatus = typing.Union[typing.Literal["uncommitted", "committed", "deleted"], typing.Any] diff --git a/src/humanloop/version.py b/src/humanloop/version.py deleted file mode 100644 index 13ce3bc0..00000000 --- a/src/humanloop/version.py +++ /dev/null @@ -1,3 +0,0 @@ -from importlib import metadata - -__version__ = metadata.version("humanloop") From ba42e3f0289232ded6cda175e9baf4b15fa390d3 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Thu, 15 May 2025 17:30:51 +0100 Subject: [PATCH 2/2] Remove humanloop directory from .gitignore --- .gitignore | 2 - src/humanloop/__init__.py | 872 ++++ src/humanloop/agents/__init__.py | 51 + src/humanloop/agents/client.py | 2946 ++++++++++++ src/humanloop/agents/raw_client.py | 4021 +++++++++++++++++ src/humanloop/agents/requests/__init__.py | 27 + .../requests/agent_log_request_agent.py | 7 + .../requests/agent_log_request_tool_choice.py | 9 + .../agent_request_reasoning_effort.py | 7 + .../agents/requests/agent_request_stop.py | 5 + .../agents/requests/agent_request_template.py | 7 + .../requests/agent_request_tools_item.py | 8 + .../requests/agents_call_request_agent.py | 7 + .../agents_call_request_tool_choice.py | 9 + .../agents_call_stream_request_agent.py | 7 + .../agents_call_stream_request_tool_choice.py | 9 + src/humanloop/agents/types/__init__.py | 27 + .../agents/types/agent_log_request_agent.py | 7 + .../types/agent_log_request_tool_choice.py | 9 + .../types/agent_request_reasoning_effort.py | 7 + .../agents/types/agent_request_stop.py | 5 + .../agents/types/agent_request_template.py | 7 + .../agents/types/agent_request_tools_item.py | 8 + .../agents/types/agents_call_request_agent.py | 7 + .../types/agents_call_request_tool_choice.py | 9 + .../types/agents_call_stream_request_agent.py | 7 + .../agents_call_stream_request_tool_choice.py | 9 + src/humanloop/base_client.py | 170 + src/humanloop/core/__init__.py | 59 + src/humanloop/core/api_error.py | 23 + src/humanloop/core/client_wrapper.py | 55 + src/humanloop/core/datetime_utils.py | 28 + src/humanloop/core/file.py | 67 + src/humanloop/core/http_client.py | 497 ++ src/humanloop/core/http_response.py | 55 + src/humanloop/core/jsonable_encoder.py | 100 + src/humanloop/core/pagination.py | 82 + src/humanloop/core/pydantic_utilities.py | 255 ++ src/humanloop/core/query_encoder.py | 58 + src/humanloop/core/remove_none_from_dict.py | 11 + src/humanloop/core/request_options.py | 35 + src/humanloop/core/serialization.py | 276 ++ src/humanloop/core/unchecked_base_model.py | 303 ++ src/humanloop/datasets/__init__.py | 7 + src/humanloop/datasets/client.py | 1330 ++++++ src/humanloop/datasets/raw_client.py | 1924 ++++++++ src/humanloop/datasets/types/__init__.py | 9 + ...versions_get_request_include_datapoints.py | 7 + src/humanloop/directories/__init__.py | 4 + src/humanloop/directories/client.py | 385 ++ src/humanloop/directories/raw_client.py | 596 +++ src/humanloop/environment.py | 7 + src/humanloop/errors/__init__.py | 7 + .../errors/unprocessable_entity_error.py | 11 + src/humanloop/evaluations/__init__.py | 27 + src/humanloop/evaluations/client.py | 1177 +++++ src/humanloop/evaluations/raw_client.py | 1845 ++++++++ .../evaluations/requests/__init__.py | 15 + .../add_evaluators_request_evaluators_item.py | 11 + ...eate_evaluation_request_evaluators_item.py | 11 + .../requests/create_run_request_dataset.py | 9 + .../requests/create_run_request_version.py | 9 + src/humanloop/evaluations/types/__init__.py | 15 + .../add_evaluators_request_evaluators_item.py | 9 + ...eate_evaluation_request_evaluators_item.py | 9 + .../types/create_run_request_dataset.py | 9 + .../types/create_run_request_version.py | 9 + src/humanloop/evaluators/__init__.py | 19 + src/humanloop/evaluators/client.py | 1411 ++++++ src/humanloop/evaluators/raw_client.py | 2014 +++++++++ src/humanloop/evaluators/requests/__init__.py | 13 + .../create_evaluator_log_request_judgment.py | 5 + .../create_evaluator_log_request_spec.py | 12 + .../requests/evaluator_request_spec.py | 12 + src/humanloop/evaluators/types/__init__.py | 9 + .../create_evaluator_log_request_judgment.py | 5 + .../create_evaluator_log_request_spec.py | 12 + .../types/evaluator_request_spec.py | 12 + src/humanloop/files/__init__.py | 8 + src/humanloop/files/client.py | 301 ++ src/humanloop/files/raw_client.py | 382 ++ src/humanloop/files/requests/__init__.py | 7 + ...th_files_retrieve_by_path_post_response.py | 19 + src/humanloop/files/types/__init__.py | 7 + ...th_files_retrieve_by_path_post_response.py | 14 + src/humanloop/flows/__init__.py | 4 + src/humanloop/flows/client.py | 1583 +++++++ src/humanloop/flows/raw_client.py | 2217 +++++++++ src/humanloop/logs/__init__.py | 4 + src/humanloop/logs/client.py | 360 ++ src/humanloop/logs/raw_client.py | 501 ++ src/humanloop/prompts/__init__.py | 51 + src/humanloop/prompts/client.py | 2990 ++++++++++++ src/humanloop/prompts/raw_client.py | 3977 ++++++++++++++++ src/humanloop/prompts/requests/__init__.py | 27 + .../requests/prompt_log_request_prompt.py | 7 + .../prompt_log_request_tool_choice.py | 9 + .../prompt_log_update_request_tool_choice.py | 9 + .../prompt_request_reasoning_effort.py | 7 + .../prompts/requests/prompt_request_stop.py | 5 + .../requests/prompt_request_template.py | 7 + .../requests/prompts_call_request_prompt.py | 7 + .../prompts_call_request_tool_choice.py | 9 + .../prompts_call_stream_request_prompt.py | 7 + ...prompts_call_stream_request_tool_choice.py | 9 + src/humanloop/prompts/types/__init__.py | 27 + .../types/prompt_log_request_prompt.py | 7 + .../types/prompt_log_request_tool_choice.py | 9 + .../prompt_log_update_request_tool_choice.py | 9 + .../types/prompt_request_reasoning_effort.py | 7 + .../prompts/types/prompt_request_stop.py | 5 + .../prompts/types/prompt_request_template.py | 7 + .../types/prompts_call_request_prompt.py | 7 + .../types/prompts_call_request_tool_choice.py | 9 + .../prompts_call_stream_request_prompt.py | 7 + ...prompts_call_stream_request_tool_choice.py | 9 + src/humanloop/py.typed | 0 src/humanloop/requests/__init__.py | 339 ++ src/humanloop/requests/agent_call_response.py | 202 + .../agent_call_response_tool_choice.py | 9 + .../requests/agent_call_stream_response.py | 19 + .../agent_call_stream_response_payload.py | 9 + .../requests/agent_config_response.py | 7 + .../requests/agent_continue_call_response.py | 202 + ...gent_continue_call_response_tool_choice.py | 9 + .../agent_continue_call_stream_response.py | 19 + ...t_continue_call_stream_response_payload.py | 9 + src/humanloop/requests/agent_inline_tool.py | 13 + .../requests/agent_kernel_request.py | 112 + .../agent_kernel_request_reasoning_effort.py | 7 + .../requests/agent_kernel_request_stop.py | 5 + .../requests/agent_kernel_request_template.py | 7 + .../agent_kernel_request_tools_item.py | 8 + .../requests/agent_linked_file_request.py | 13 + .../requests/agent_linked_file_response.py | 19 + .../agent_linked_file_response_file.py | 22 + src/humanloop/requests/agent_log_response.py | 201 + .../agent_log_response_tool_choice.py | 9 + .../requests/agent_log_stream_response.py | 87 + src/humanloop/requests/agent_response.py | 242 + .../agent_response_reasoning_effort.py | 7 + src/humanloop/requests/agent_response_stop.py | 5 + .../requests/agent_response_template.py | 7 + .../requests/agent_response_tools_item.py | 11 + .../anthropic_redacted_thinking_content.py | 13 + .../requests/anthropic_thinking_content.py | 18 + .../boolean_evaluator_stats_response.py | 45 + src/humanloop/requests/chat_message.py | 41 + .../requests/chat_message_content.py | 7 + .../requests/chat_message_content_item.py | 8 + .../requests/chat_message_thinking_item.py | 8 + .../requests/code_evaluator_request.py | 48 + .../requests/create_agent_log_response.py | 30 + .../requests/create_datapoint_request.py | 24 + .../create_datapoint_request_target_value.py | 7 + .../requests/create_evaluator_log_response.py | 25 + .../requests/create_flow_log_response.py | 30 + .../requests/create_prompt_log_response.py | 25 + .../requests/create_tool_log_response.py | 25 + .../requests/dashboard_configuration.py | 12 + src/humanloop/requests/datapoint_response.py | 29 + .../datapoint_response_target_value.py | 7 + src/humanloop/requests/dataset_response.py | 102 + src/humanloop/requests/directory_response.py | 46 + ...tory_with_parents_and_children_response.py | 64 + ...arents_and_children_response_files_item.py | 19 + .../requests/environment_response.py | 13 + src/humanloop/requests/evaluatee_request.py | 45 + src/humanloop/requests/evaluatee_response.py | 33 + .../requests/evaluation_evaluator_response.py | 19 + .../requests/evaluation_log_response.py | 29 + src/humanloop/requests/evaluation_response.py | 43 + .../requests/evaluation_run_response.py | 56 + .../requests/evaluation_runs_response.py | 13 + src/humanloop/requests/evaluation_stats.py | 29 + ...aluator_activation_deactivation_request.py | 25 + ...tion_deactivation_request_activate_item.py | 10 + ...on_deactivation_request_deactivate_item.py | 10 + src/humanloop/requests/evaluator_aggregate.py | 25 + .../requests/evaluator_config_response.py | 7 + src/humanloop/requests/evaluator_file_id.py | 24 + src/humanloop/requests/evaluator_file_path.py | 24 + .../evaluator_judgment_number_limit.py | 20 + .../evaluator_judgment_option_response.py | 16 + .../requests/evaluator_log_response.py | 176 + .../evaluator_log_response_judgment.py | 5 + src/humanloop/requests/evaluator_response.py | 122 + .../requests/evaluator_response_spec.py | 12 + .../requests/evaluator_version_id.py | 19 + .../requests/external_evaluator_request.py | 44 + .../requests/file_environment_response.py | 24 + .../file_environment_response_file.py | 19 + .../file_environment_variable_request.py | 15 + src/humanloop/requests/file_id.py | 19 + src/humanloop/requests/file_path.py | 19 + src/humanloop/requests/file_request.py | 15 + src/humanloop/requests/flow_kernel_request.py | 12 + src/humanloop/requests/flow_log_response.py | 161 + src/humanloop/requests/flow_response.py | 109 + src/humanloop/requests/function_tool.py | 12 + .../requests/function_tool_choice.py | 11 + .../requests/http_validation_error.py | 10 + .../requests/human_evaluator_request.py | 48 + src/humanloop/requests/image_chat_content.py | 14 + src/humanloop/requests/image_url.py | 16 + src/humanloop/requests/input_response.py | 10 + src/humanloop/requests/linked_file_request.py | 9 + .../requests/linked_tool_response.py | 37 + src/humanloop/requests/list_agents.py | 13 + src/humanloop/requests/list_datasets.py | 13 + src/humanloop/requests/list_evaluators.py | 13 + src/humanloop/requests/list_flows.py | 13 + src/humanloop/requests/list_prompts.py | 13 + src/humanloop/requests/list_tools.py | 13 + .../requests/llm_evaluator_request.py | 49 + src/humanloop/requests/log_response.py | 19 + src/humanloop/requests/log_stream_response.py | 8 + ...onitoring_evaluator_environment_request.py | 15 + .../requests/monitoring_evaluator_response.py | 33 + .../monitoring_evaluator_version_request.py | 10 + .../numeric_evaluator_stats_response.py | 42 + src/humanloop/requests/overall_stats.py | 30 + .../requests/paginated_data_agent_response.py | 13 + .../paginated_data_evaluation_log_response.py | 13 + .../paginated_data_evaluator_response.py | 13 + .../requests/paginated_data_flow_response.py | 13 + .../requests/paginated_data_log_response.py | 13 + .../paginated_data_prompt_response.py | 13 + .../requests/paginated_data_tool_response.py | 13 + ...r_response_flow_response_agent_response.py | 19 + ...ow_response_agent_response_records_item.py | 19 + .../requests/paginated_datapoint_response.py | 13 + .../requests/paginated_dataset_response.py | 13 + .../requests/paginated_evaluation_response.py | 13 + .../requests/populate_template_response.py | 229 + ...te_template_response_populated_template.py | 7 + ...late_template_response_reasoning_effort.py | 7 + .../populate_template_response_stop.py | 5 + .../populate_template_response_template.py | 7 + .../requests/prompt_call_log_response.py | 77 + .../requests/prompt_call_response.py | 111 + .../prompt_call_response_tool_choice.py | 9 + .../requests/prompt_call_stream_response.py | 92 + .../requests/prompt_kernel_request.py | 116 + .../prompt_kernel_request_reasoning_effort.py | 7 + .../requests/prompt_kernel_request_stop.py | 5 + .../prompt_kernel_request_template.py | 7 + src/humanloop/requests/prompt_log_response.py | 201 + .../prompt_log_response_tool_choice.py | 9 + src/humanloop/requests/prompt_response.py | 227 + .../prompt_response_reasoning_effort.py | 7 + .../requests/prompt_response_stop.py | 5 + .../requests/prompt_response_template.py | 7 + src/humanloop/requests/provider_api_keys.py | 16 + src/humanloop/requests/response_format.py | 18 + src/humanloop/requests/run_stats_response.py | 43 + ...run_stats_response_evaluator_stats_item.py | 15 + .../requests/run_version_response.py | 13 + .../select_evaluator_stats_response.py | 41 + src/humanloop/requests/text_chat_content.py | 13 + .../requests/text_evaluator_stats_response.py | 35 + src/humanloop/requests/tool_call.py | 15 + src/humanloop/requests/tool_call_response.py | 146 + src/humanloop/requests/tool_choice.py | 14 + src/humanloop/requests/tool_function.py | 27 + src/humanloop/requests/tool_kernel_request.py | 28 + src/humanloop/requests/tool_log_response.py | 156 + src/humanloop/requests/tool_response.py | 145 + .../requests/update_version_request.py | 15 + src/humanloop/requests/validation_error.py | 12 + .../requests/validation_error_loc_item.py | 5 + .../requests/version_deployment_response.py | 29 + .../version_deployment_response_file.py | 22 + src/humanloop/requests/version_id.py | 10 + src/humanloop/requests/version_id_response.py | 23 + .../requests/version_id_response_version.py | 22 + .../requests/version_reference_response.py | 10 + .../requests/version_stats_response.py | 28 + ...s_response_evaluator_version_stats_item.py | 15 + src/humanloop/tools/__init__.py | 4 + src/humanloop/tools/client.py | 2101 +++++++++ src/humanloop/tools/raw_client.py | 2917 ++++++++++++ src/humanloop/types/__init__.py | 411 ++ src/humanloop/types/agent_call_response.py | 231 + .../types/agent_call_response_tool_choice.py | 9 + .../types/agent_call_stream_response.py | 51 + .../agent_call_stream_response_payload.py | 9 + src/humanloop/types/agent_config_response.py | 18 + .../types/agent_continue_call_response.py | 231 + ...gent_continue_call_response_tool_choice.py | 9 + .../agent_continue_call_stream_response.py | 51 + ...t_continue_call_stream_response_payload.py | 9 + src/humanloop/types/agent_inline_tool.py | 24 + src/humanloop/types/agent_kernel_request.py | 123 + .../agent_kernel_request_reasoning_effort.py | 7 + .../types/agent_kernel_request_stop.py | 5 + .../types/agent_kernel_request_template.py | 7 + .../types/agent_kernel_request_tools_item.py | 8 + .../types/agent_linked_file_request.py | 24 + .../types/agent_linked_file_response.py | 40 + .../types/agent_linked_file_response_file.py | 17 + src/humanloop/types/agent_log_response.py | 225 + .../types/agent_log_response_tool_choice.py | 9 + .../types/agent_log_stream_response.py | 99 + src/humanloop/types/agent_response.py | 266 ++ .../types/agent_response_reasoning_effort.py | 7 + src/humanloop/types/agent_response_stop.py | 5 + .../types/agent_response_template.py | 7 + .../types/agent_response_tools_item.py | 11 + .../anthropic_redacted_thinking_content.py | 24 + .../types/anthropic_thinking_content.py | 29 + .../types/base_models_user_response.py | 5 + .../types/boolean_evaluator_stats_response.py | 58 + src/humanloop/types/chat_message.py | 52 + src/humanloop/types/chat_message_content.py | 7 + .../types/chat_message_content_item.py | 8 + .../types/chat_message_thinking_item.py | 8 + src/humanloop/types/chat_role.py | 5 + src/humanloop/types/chat_tool_type.py | 5 + src/humanloop/types/code_evaluator_request.py | 59 + src/humanloop/types/config_tool_response.py | 5 + .../types/create_agent_log_response.py | 43 + .../types/create_datapoint_request.py | 35 + .../create_datapoint_request_target_value.py | 7 + .../types/create_evaluator_log_response.py | 38 + .../types/create_flow_log_response.py | 43 + .../types/create_prompt_log_response.py | 38 + .../types/create_tool_log_response.py | 38 + .../types/dashboard_configuration.py | 23 + src/humanloop/types/datapoint_response.py | 40 + .../types/datapoint_response_target_value.py | 7 + src/humanloop/types/dataset_response.py | 117 + src/humanloop/types/datasets_request.py | 5 + src/humanloop/types/directory_response.py | 57 + ...tory_with_parents_and_children_response.py | 88 + ...arents_and_children_response_files_item.py | 14 + src/humanloop/types/environment_response.py | 25 + src/humanloop/types/environment_tag.py | 5 + src/humanloop/types/evaluatee_request.py | 58 + src/humanloop/types/evaluatee_response.py | 60 + .../types/evaluation_evaluator_response.py | 45 + .../types/evaluation_log_response.py | 60 + src/humanloop/types/evaluation_response.py | 69 + .../types/evaluation_run_response.py | 83 + .../types/evaluation_runs_response.py | 39 + src/humanloop/types/evaluation_stats.py | 40 + src/humanloop/types/evaluation_status.py | 5 + .../types/evaluations_dataset_request.py | 5 + src/humanloop/types/evaluations_request.py | 5 + ...aluator_activation_deactivation_request.py | 36 + ...tion_deactivation_request_activate_item.py | 10 + ...on_deactivation_request_deactivate_item.py | 10 + src/humanloop/types/evaluator_aggregate.py | 37 + .../types/evaluator_arguments_type.py | 5 + .../types/evaluator_config_response.py | 18 + src/humanloop/types/evaluator_file_id.py | 37 + src/humanloop/types/evaluator_file_path.py | 37 + .../types/evaluator_judgment_number_limit.py | 33 + .../evaluator_judgment_option_response.py | 29 + src/humanloop/types/evaluator_log_response.py | 201 + .../types/evaluator_log_response_judgment.py | 5 + src/humanloop/types/evaluator_response.py | 146 + .../types/evaluator_response_spec.py | 12 + .../types/evaluator_return_type_enum.py | 7 + src/humanloop/types/evaluator_version_id.py | 32 + src/humanloop/types/evaluators_request.py | 5 + src/humanloop/types/event_type.py | 21 + .../types/external_evaluator_request.py | 55 + src/humanloop/types/feedback_type.py | 5 + .../types/file_environment_response.py | 51 + .../types/file_environment_response_file.py | 14 + .../file_environment_variable_request.py | 28 + src/humanloop/types/file_id.py | 32 + src/humanloop/types/file_path.py | 32 + src/humanloop/types/file_request.py | 28 + src/humanloop/types/file_sort_by.py | 5 + src/humanloop/types/file_type.py | 5 + src/humanloop/types/files_tool_type.py | 7 + src/humanloop/types/flow_kernel_request.py | 23 + src/humanloop/types/flow_log_response.py | 185 + src/humanloop/types/flow_response.py | 133 + src/humanloop/types/function_tool.py | 25 + src/humanloop/types/function_tool_choice.py | 24 + src/humanloop/types/http_validation_error.py | 21 + .../types/human_evaluator_request.py | 59 + .../human_evaluator_request_return_type.py | 7 + src/humanloop/types/image_chat_content.py | 25 + src/humanloop/types/image_url.py | 29 + src/humanloop/types/image_url_detail.py | 5 + src/humanloop/types/input_response.py | 23 + src/humanloop/types/linked_file_request.py | 22 + src/humanloop/types/linked_tool_response.py | 48 + src/humanloop/types/list_agents.py | 38 + src/humanloop/types/list_datasets.py | 24 + src/humanloop/types/list_evaluators.py | 38 + src/humanloop/types/list_flows.py | 38 + src/humanloop/types/list_prompts.py | 38 + src/humanloop/types/list_tools.py | 38 + src/humanloop/types/llm_evaluator_request.py | 60 + src/humanloop/types/log_response.py | 15 + src/humanloop/types/log_status.py | 5 + src/humanloop/types/log_stream_response.py | 8 + src/humanloop/types/model_endpoints.py | 5 + src/humanloop/types/model_providers.py | 10 + ...onitoring_evaluator_environment_request.py | 28 + .../types/monitoring_evaluator_response.py | 53 + .../types/monitoring_evaluator_state.py | 5 + .../monitoring_evaluator_version_request.py | 23 + .../types/numeric_evaluator_stats_response.py | 53 + src/humanloop/types/observability_status.py | 5 + src/humanloop/types/on_agent_call_enum.py | 5 + .../types/open_ai_reasoning_effort.py | 5 + src/humanloop/types/overall_stats.py | 43 + .../types/paginated_data_agent_response.py | 38 + .../paginated_data_evaluation_log_response.py | 44 + .../paginated_data_evaluator_response.py | 38 + .../types/paginated_data_flow_response.py | 38 + .../types/paginated_data_log_response.py | 44 + .../types/paginated_data_prompt_response.py | 38 + .../types/paginated_data_tool_response.py | 38 + ...r_response_flow_response_agent_response.py | 47 + ...ow_response_agent_response_records_item.py | 14 + .../types/paginated_datapoint_response.py | 24 + .../types/paginated_dataset_response.py | 24 + .../types/paginated_evaluation_response.py | 39 + .../types/paginated_prompt_log_response.py | 5 + .../types/paginated_session_response.py | 5 + src/humanloop/types/platform_access_enum.py | 5 + .../types/populate_template_response.py | 258 ++ ...te_template_response_populated_template.py | 7 + ...late_template_response_reasoning_effort.py | 7 + .../types/populate_template_response_stop.py | 5 + .../populate_template_response_template.py | 7 + .../types/prompt_call_log_response.py | 89 + src/humanloop/types/prompt_call_response.py | 136 + .../types/prompt_call_response_tool_choice.py | 9 + .../types/prompt_call_stream_response.py | 104 + src/humanloop/types/prompt_kernel_request.py | 127 + .../prompt_kernel_request_reasoning_effort.py | 7 + .../types/prompt_kernel_request_stop.py | 5 + .../types/prompt_kernel_request_template.py | 7 + src/humanloop/types/prompt_log_response.py | 225 + .../types/prompt_log_response_tool_choice.py | 9 + src/humanloop/types/prompt_response.py | 251 + .../types/prompt_response_reasoning_effort.py | 7 + src/humanloop/types/prompt_response_stop.py | 5 + .../types/prompt_response_template.py | 7 + src/humanloop/types/provider_api_keys.py | 30 + src/humanloop/types/response_format.py | 29 + src/humanloop/types/response_format_type.py | 5 + src/humanloop/types/run_stats_response.py | 54 + ...run_stats_response_evaluator_stats_item.py | 15 + src/humanloop/types/run_version_response.py | 11 + .../types/select_evaluator_stats_response.py | 52 + src/humanloop/types/sort_order.py | 5 + src/humanloop/types/template_language.py | 5 + src/humanloop/types/text_chat_content.py | 24 + .../types/text_evaluator_stats_response.py | 48 + src/humanloop/types/time_unit.py | 5 + src/humanloop/types/tool_call.py | 28 + src/humanloop/types/tool_call_response.py | 175 + src/humanloop/types/tool_choice.py | 27 + src/humanloop/types/tool_function.py | 38 + src/humanloop/types/tool_kernel_request.py | 39 + src/humanloop/types/tool_log_response.py | 180 + src/humanloop/types/tool_response.py | 165 + src/humanloop/types/update_dateset_action.py | 5 + .../types/update_evaluation_status_request.py | 5 + src/humanloop/types/update_version_request.py | 28 + src/humanloop/types/user_response.py | 5 + src/humanloop/types/valence.py | 5 + src/humanloop/types/validation_error.py | 23 + .../types/validation_error_loc_item.py | 5 + .../types/version_deployment_response.py | 50 + .../types/version_deployment_response_file.py | 17 + src/humanloop/types/version_id.py | 23 + src/humanloop/types/version_id_response.py | 44 + .../types/version_id_response_version.py | 17 + .../types/version_reference_response.py | 10 + src/humanloop/types/version_stats_response.py | 39 + ...s_response_evaluator_version_stats_item.py | 15 + src/humanloop/types/version_status.py | 5 + src/humanloop/version.py | 3 + 483 files changed, 53227 insertions(+), 2 deletions(-) create mode 100644 src/humanloop/__init__.py create mode 100644 src/humanloop/agents/__init__.py create mode 100644 src/humanloop/agents/client.py create mode 100644 src/humanloop/agents/raw_client.py create mode 100644 src/humanloop/agents/requests/__init__.py create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py create mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py create mode 100644 src/humanloop/agents/requests/agent_request_reasoning_effort.py create mode 100644 src/humanloop/agents/requests/agent_request_stop.py create mode 100644 src/humanloop/agents/requests/agent_request_template.py create mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py create mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py create mode 100644 src/humanloop/agents/types/__init__.py create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py create mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py create mode 100644 src/humanloop/agents/types/agent_request_reasoning_effort.py create mode 100644 src/humanloop/agents/types/agent_request_stop.py create mode 100644 src/humanloop/agents/types/agent_request_template.py create mode 100644 src/humanloop/agents/types/agent_request_tools_item.py create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py create mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py create mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py create mode 100644 src/humanloop/base_client.py create mode 100644 src/humanloop/core/__init__.py create mode 100644 src/humanloop/core/api_error.py create mode 100644 src/humanloop/core/client_wrapper.py create mode 100644 src/humanloop/core/datetime_utils.py create mode 100644 src/humanloop/core/file.py create mode 100644 src/humanloop/core/http_client.py create mode 100644 src/humanloop/core/http_response.py create mode 100644 src/humanloop/core/jsonable_encoder.py create mode 100644 src/humanloop/core/pagination.py create mode 100644 src/humanloop/core/pydantic_utilities.py create mode 100644 src/humanloop/core/query_encoder.py create mode 100644 src/humanloop/core/remove_none_from_dict.py create mode 100644 src/humanloop/core/request_options.py create mode 100644 src/humanloop/core/serialization.py create mode 100644 src/humanloop/core/unchecked_base_model.py create mode 100644 src/humanloop/datasets/__init__.py create mode 100644 src/humanloop/datasets/client.py create mode 100644 src/humanloop/datasets/raw_client.py create mode 100644 src/humanloop/datasets/types/__init__.py create mode 100644 src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py create mode 100644 src/humanloop/directories/__init__.py create mode 100644 src/humanloop/directories/client.py create mode 100644 src/humanloop/directories/raw_client.py create mode 100644 src/humanloop/environment.py create mode 100644 src/humanloop/errors/__init__.py create mode 100644 src/humanloop/errors/unprocessable_entity_error.py create mode 100644 src/humanloop/evaluations/__init__.py create mode 100644 src/humanloop/evaluations/client.py create mode 100644 src/humanloop/evaluations/raw_client.py create mode 100644 src/humanloop/evaluations/requests/__init__.py create mode 100644 src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py create mode 100644 src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py create mode 100644 src/humanloop/evaluations/requests/create_run_request_dataset.py create mode 100644 src/humanloop/evaluations/requests/create_run_request_version.py create mode 100644 src/humanloop/evaluations/types/__init__.py create mode 100644 src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py create mode 100644 src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py create mode 100644 src/humanloop/evaluations/types/create_run_request_dataset.py create mode 100644 src/humanloop/evaluations/types/create_run_request_version.py create mode 100644 src/humanloop/evaluators/__init__.py create mode 100644 src/humanloop/evaluators/client.py create mode 100644 src/humanloop/evaluators/raw_client.py create mode 100644 src/humanloop/evaluators/requests/__init__.py create mode 100644 src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py create mode 100644 src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py create mode 100644 src/humanloop/evaluators/requests/evaluator_request_spec.py create mode 100644 src/humanloop/evaluators/types/__init__.py create mode 100644 src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py create mode 100644 src/humanloop/evaluators/types/create_evaluator_log_request_spec.py create mode 100644 src/humanloop/evaluators/types/evaluator_request_spec.py create mode 100644 src/humanloop/files/__init__.py create mode 100644 src/humanloop/files/client.py create mode 100644 src/humanloop/files/raw_client.py create mode 100644 src/humanloop/files/requests/__init__.py create mode 100644 src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py create mode 100644 src/humanloop/files/types/__init__.py create mode 100644 src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py create mode 100644 src/humanloop/flows/__init__.py create mode 100644 src/humanloop/flows/client.py create mode 100644 src/humanloop/flows/raw_client.py create mode 100644 src/humanloop/logs/__init__.py create mode 100644 src/humanloop/logs/client.py create mode 100644 src/humanloop/logs/raw_client.py create mode 100644 src/humanloop/prompts/__init__.py create mode 100644 src/humanloop/prompts/client.py create mode 100644 src/humanloop/prompts/raw_client.py create mode 100644 src/humanloop/prompts/requests/__init__.py create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py create mode 100644 src/humanloop/prompts/requests/prompt_log_request_tool_choice.py create mode 100644 src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py create mode 100644 src/humanloop/prompts/requests/prompt_request_reasoning_effort.py create mode 100644 src/humanloop/prompts/requests/prompt_request_stop.py create mode 100644 src/humanloop/prompts/requests/prompt_request_template.py create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py create mode 100644 src/humanloop/prompts/requests/prompts_call_request_tool_choice.py create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py create mode 100644 src/humanloop/prompts/types/__init__.py create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py create mode 100644 src/humanloop/prompts/types/prompt_log_request_tool_choice.py create mode 100644 src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py create mode 100644 src/humanloop/prompts/types/prompt_request_reasoning_effort.py create mode 100644 src/humanloop/prompts/types/prompt_request_stop.py create mode 100644 src/humanloop/prompts/types/prompt_request_template.py create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py create mode 100644 src/humanloop/prompts/types/prompts_call_request_tool_choice.py create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py create mode 100644 src/humanloop/py.typed create mode 100644 src/humanloop/requests/__init__.py create mode 100644 src/humanloop/requests/agent_call_response.py create mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_call_stream_response.py create mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py create mode 100644 src/humanloop/requests/agent_config_response.py create mode 100644 src/humanloop/requests/agent_continue_call_response.py create mode 100644 src/humanloop/requests/agent_continue_call_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_continue_call_stream_response.py create mode 100644 src/humanloop/requests/agent_continue_call_stream_response_payload.py create mode 100644 src/humanloop/requests/agent_inline_tool.py create mode 100644 src/humanloop/requests/agent_kernel_request.py create mode 100644 src/humanloop/requests/agent_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/requests/agent_kernel_request_stop.py create mode 100644 src/humanloop/requests/agent_kernel_request_template.py create mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py create mode 100644 src/humanloop/requests/agent_linked_file_request.py create mode 100644 src/humanloop/requests/agent_linked_file_response.py create mode 100644 src/humanloop/requests/agent_linked_file_response_file.py create mode 100644 src/humanloop/requests/agent_log_response.py create mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_log_stream_response.py create mode 100644 src/humanloop/requests/agent_response.py create mode 100644 src/humanloop/requests/agent_response_reasoning_effort.py create mode 100644 src/humanloop/requests/agent_response_stop.py create mode 100644 src/humanloop/requests/agent_response_template.py create mode 100644 src/humanloop/requests/agent_response_tools_item.py create mode 100644 src/humanloop/requests/anthropic_redacted_thinking_content.py create mode 100644 src/humanloop/requests/anthropic_thinking_content.py create mode 100644 src/humanloop/requests/boolean_evaluator_stats_response.py create mode 100644 src/humanloop/requests/chat_message.py create mode 100644 src/humanloop/requests/chat_message_content.py create mode 100644 src/humanloop/requests/chat_message_content_item.py create mode 100644 src/humanloop/requests/chat_message_thinking_item.py create mode 100644 src/humanloop/requests/code_evaluator_request.py create mode 100644 src/humanloop/requests/create_agent_log_response.py create mode 100644 src/humanloop/requests/create_datapoint_request.py create mode 100644 src/humanloop/requests/create_datapoint_request_target_value.py create mode 100644 src/humanloop/requests/create_evaluator_log_response.py create mode 100644 src/humanloop/requests/create_flow_log_response.py create mode 100644 src/humanloop/requests/create_prompt_log_response.py create mode 100644 src/humanloop/requests/create_tool_log_response.py create mode 100644 src/humanloop/requests/dashboard_configuration.py create mode 100644 src/humanloop/requests/datapoint_response.py create mode 100644 src/humanloop/requests/datapoint_response_target_value.py create mode 100644 src/humanloop/requests/dataset_response.py create mode 100644 src/humanloop/requests/directory_response.py create mode 100644 src/humanloop/requests/directory_with_parents_and_children_response.py create mode 100644 src/humanloop/requests/directory_with_parents_and_children_response_files_item.py create mode 100644 src/humanloop/requests/environment_response.py create mode 100644 src/humanloop/requests/evaluatee_request.py create mode 100644 src/humanloop/requests/evaluatee_response.py create mode 100644 src/humanloop/requests/evaluation_evaluator_response.py create mode 100644 src/humanloop/requests/evaluation_log_response.py create mode 100644 src/humanloop/requests/evaluation_response.py create mode 100644 src/humanloop/requests/evaluation_run_response.py create mode 100644 src/humanloop/requests/evaluation_runs_response.py create mode 100644 src/humanloop/requests/evaluation_stats.py create mode 100644 src/humanloop/requests/evaluator_activation_deactivation_request.py create mode 100644 src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py create mode 100644 src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py create mode 100644 src/humanloop/requests/evaluator_aggregate.py create mode 100644 src/humanloop/requests/evaluator_config_response.py create mode 100644 src/humanloop/requests/evaluator_file_id.py create mode 100644 src/humanloop/requests/evaluator_file_path.py create mode 100644 src/humanloop/requests/evaluator_judgment_number_limit.py create mode 100644 src/humanloop/requests/evaluator_judgment_option_response.py create mode 100644 src/humanloop/requests/evaluator_log_response.py create mode 100644 src/humanloop/requests/evaluator_log_response_judgment.py create mode 100644 src/humanloop/requests/evaluator_response.py create mode 100644 src/humanloop/requests/evaluator_response_spec.py create mode 100644 src/humanloop/requests/evaluator_version_id.py create mode 100644 src/humanloop/requests/external_evaluator_request.py create mode 100644 src/humanloop/requests/file_environment_response.py create mode 100644 src/humanloop/requests/file_environment_response_file.py create mode 100644 src/humanloop/requests/file_environment_variable_request.py create mode 100644 src/humanloop/requests/file_id.py create mode 100644 src/humanloop/requests/file_path.py create mode 100644 src/humanloop/requests/file_request.py create mode 100644 src/humanloop/requests/flow_kernel_request.py create mode 100644 src/humanloop/requests/flow_log_response.py create mode 100644 src/humanloop/requests/flow_response.py create mode 100644 src/humanloop/requests/function_tool.py create mode 100644 src/humanloop/requests/function_tool_choice.py create mode 100644 src/humanloop/requests/http_validation_error.py create mode 100644 src/humanloop/requests/human_evaluator_request.py create mode 100644 src/humanloop/requests/image_chat_content.py create mode 100644 src/humanloop/requests/image_url.py create mode 100644 src/humanloop/requests/input_response.py create mode 100644 src/humanloop/requests/linked_file_request.py create mode 100644 src/humanloop/requests/linked_tool_response.py create mode 100644 src/humanloop/requests/list_agents.py create mode 100644 src/humanloop/requests/list_datasets.py create mode 100644 src/humanloop/requests/list_evaluators.py create mode 100644 src/humanloop/requests/list_flows.py create mode 100644 src/humanloop/requests/list_prompts.py create mode 100644 src/humanloop/requests/list_tools.py create mode 100644 src/humanloop/requests/llm_evaluator_request.py create mode 100644 src/humanloop/requests/log_response.py create mode 100644 src/humanloop/requests/log_stream_response.py create mode 100644 src/humanloop/requests/monitoring_evaluator_environment_request.py create mode 100644 src/humanloop/requests/monitoring_evaluator_response.py create mode 100644 src/humanloop/requests/monitoring_evaluator_version_request.py create mode 100644 src/humanloop/requests/numeric_evaluator_stats_response.py create mode 100644 src/humanloop/requests/overall_stats.py create mode 100644 src/humanloop/requests/paginated_data_agent_response.py create mode 100644 src/humanloop/requests/paginated_data_evaluation_log_response.py create mode 100644 src/humanloop/requests/paginated_data_evaluator_response.py create mode 100644 src/humanloop/requests/paginated_data_flow_response.py create mode 100644 src/humanloop/requests/paginated_data_log_response.py create mode 100644 src/humanloop/requests/paginated_data_prompt_response.py create mode 100644 src/humanloop/requests/paginated_data_tool_response.py create mode 100644 src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py create mode 100644 src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py create mode 100644 src/humanloop/requests/paginated_datapoint_response.py create mode 100644 src/humanloop/requests/paginated_dataset_response.py create mode 100644 src/humanloop/requests/paginated_evaluation_response.py create mode 100644 src/humanloop/requests/populate_template_response.py create mode 100644 src/humanloop/requests/populate_template_response_populated_template.py create mode 100644 src/humanloop/requests/populate_template_response_reasoning_effort.py create mode 100644 src/humanloop/requests/populate_template_response_stop.py create mode 100644 src/humanloop/requests/populate_template_response_template.py create mode 100644 src/humanloop/requests/prompt_call_log_response.py create mode 100644 src/humanloop/requests/prompt_call_response.py create mode 100644 src/humanloop/requests/prompt_call_response_tool_choice.py create mode 100644 src/humanloop/requests/prompt_call_stream_response.py create mode 100644 src/humanloop/requests/prompt_kernel_request.py create mode 100644 src/humanloop/requests/prompt_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/requests/prompt_kernel_request_stop.py create mode 100644 src/humanloop/requests/prompt_kernel_request_template.py create mode 100644 src/humanloop/requests/prompt_log_response.py create mode 100644 src/humanloop/requests/prompt_log_response_tool_choice.py create mode 100644 src/humanloop/requests/prompt_response.py create mode 100644 src/humanloop/requests/prompt_response_reasoning_effort.py create mode 100644 src/humanloop/requests/prompt_response_stop.py create mode 100644 src/humanloop/requests/prompt_response_template.py create mode 100644 src/humanloop/requests/provider_api_keys.py create mode 100644 src/humanloop/requests/response_format.py create mode 100644 src/humanloop/requests/run_stats_response.py create mode 100644 src/humanloop/requests/run_stats_response_evaluator_stats_item.py create mode 100644 src/humanloop/requests/run_version_response.py create mode 100644 src/humanloop/requests/select_evaluator_stats_response.py create mode 100644 src/humanloop/requests/text_chat_content.py create mode 100644 src/humanloop/requests/text_evaluator_stats_response.py create mode 100644 src/humanloop/requests/tool_call.py create mode 100644 src/humanloop/requests/tool_call_response.py create mode 100644 src/humanloop/requests/tool_choice.py create mode 100644 src/humanloop/requests/tool_function.py create mode 100644 src/humanloop/requests/tool_kernel_request.py create mode 100644 src/humanloop/requests/tool_log_response.py create mode 100644 src/humanloop/requests/tool_response.py create mode 100644 src/humanloop/requests/update_version_request.py create mode 100644 src/humanloop/requests/validation_error.py create mode 100644 src/humanloop/requests/validation_error_loc_item.py create mode 100644 src/humanloop/requests/version_deployment_response.py create mode 100644 src/humanloop/requests/version_deployment_response_file.py create mode 100644 src/humanloop/requests/version_id.py create mode 100644 src/humanloop/requests/version_id_response.py create mode 100644 src/humanloop/requests/version_id_response_version.py create mode 100644 src/humanloop/requests/version_reference_response.py create mode 100644 src/humanloop/requests/version_stats_response.py create mode 100644 src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py create mode 100644 src/humanloop/tools/__init__.py create mode 100644 src/humanloop/tools/client.py create mode 100644 src/humanloop/tools/raw_client.py create mode 100644 src/humanloop/types/__init__.py create mode 100644 src/humanloop/types/agent_call_response.py create mode 100644 src/humanloop/types/agent_call_response_tool_choice.py create mode 100644 src/humanloop/types/agent_call_stream_response.py create mode 100644 src/humanloop/types/agent_call_stream_response_payload.py create mode 100644 src/humanloop/types/agent_config_response.py create mode 100644 src/humanloop/types/agent_continue_call_response.py create mode 100644 src/humanloop/types/agent_continue_call_response_tool_choice.py create mode 100644 src/humanloop/types/agent_continue_call_stream_response.py create mode 100644 src/humanloop/types/agent_continue_call_stream_response_payload.py create mode 100644 src/humanloop/types/agent_inline_tool.py create mode 100644 src/humanloop/types/agent_kernel_request.py create mode 100644 src/humanloop/types/agent_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/types/agent_kernel_request_stop.py create mode 100644 src/humanloop/types/agent_kernel_request_template.py create mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py create mode 100644 src/humanloop/types/agent_linked_file_request.py create mode 100644 src/humanloop/types/agent_linked_file_response.py create mode 100644 src/humanloop/types/agent_linked_file_response_file.py create mode 100644 src/humanloop/types/agent_log_response.py create mode 100644 src/humanloop/types/agent_log_response_tool_choice.py create mode 100644 src/humanloop/types/agent_log_stream_response.py create mode 100644 src/humanloop/types/agent_response.py create mode 100644 src/humanloop/types/agent_response_reasoning_effort.py create mode 100644 src/humanloop/types/agent_response_stop.py create mode 100644 src/humanloop/types/agent_response_template.py create mode 100644 src/humanloop/types/agent_response_tools_item.py create mode 100644 src/humanloop/types/anthropic_redacted_thinking_content.py create mode 100644 src/humanloop/types/anthropic_thinking_content.py create mode 100644 src/humanloop/types/base_models_user_response.py create mode 100644 src/humanloop/types/boolean_evaluator_stats_response.py create mode 100644 src/humanloop/types/chat_message.py create mode 100644 src/humanloop/types/chat_message_content.py create mode 100644 src/humanloop/types/chat_message_content_item.py create mode 100644 src/humanloop/types/chat_message_thinking_item.py create mode 100644 src/humanloop/types/chat_role.py create mode 100644 src/humanloop/types/chat_tool_type.py create mode 100644 src/humanloop/types/code_evaluator_request.py create mode 100644 src/humanloop/types/config_tool_response.py create mode 100644 src/humanloop/types/create_agent_log_response.py create mode 100644 src/humanloop/types/create_datapoint_request.py create mode 100644 src/humanloop/types/create_datapoint_request_target_value.py create mode 100644 src/humanloop/types/create_evaluator_log_response.py create mode 100644 src/humanloop/types/create_flow_log_response.py create mode 100644 src/humanloop/types/create_prompt_log_response.py create mode 100644 src/humanloop/types/create_tool_log_response.py create mode 100644 src/humanloop/types/dashboard_configuration.py create mode 100644 src/humanloop/types/datapoint_response.py create mode 100644 src/humanloop/types/datapoint_response_target_value.py create mode 100644 src/humanloop/types/dataset_response.py create mode 100644 src/humanloop/types/datasets_request.py create mode 100644 src/humanloop/types/directory_response.py create mode 100644 src/humanloop/types/directory_with_parents_and_children_response.py create mode 100644 src/humanloop/types/directory_with_parents_and_children_response_files_item.py create mode 100644 src/humanloop/types/environment_response.py create mode 100644 src/humanloop/types/environment_tag.py create mode 100644 src/humanloop/types/evaluatee_request.py create mode 100644 src/humanloop/types/evaluatee_response.py create mode 100644 src/humanloop/types/evaluation_evaluator_response.py create mode 100644 src/humanloop/types/evaluation_log_response.py create mode 100644 src/humanloop/types/evaluation_response.py create mode 100644 src/humanloop/types/evaluation_run_response.py create mode 100644 src/humanloop/types/evaluation_runs_response.py create mode 100644 src/humanloop/types/evaluation_stats.py create mode 100644 src/humanloop/types/evaluation_status.py create mode 100644 src/humanloop/types/evaluations_dataset_request.py create mode 100644 src/humanloop/types/evaluations_request.py create mode 100644 src/humanloop/types/evaluator_activation_deactivation_request.py create mode 100644 src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py create mode 100644 src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py create mode 100644 src/humanloop/types/evaluator_aggregate.py create mode 100644 src/humanloop/types/evaluator_arguments_type.py create mode 100644 src/humanloop/types/evaluator_config_response.py create mode 100644 src/humanloop/types/evaluator_file_id.py create mode 100644 src/humanloop/types/evaluator_file_path.py create mode 100644 src/humanloop/types/evaluator_judgment_number_limit.py create mode 100644 src/humanloop/types/evaluator_judgment_option_response.py create mode 100644 src/humanloop/types/evaluator_log_response.py create mode 100644 src/humanloop/types/evaluator_log_response_judgment.py create mode 100644 src/humanloop/types/evaluator_response.py create mode 100644 src/humanloop/types/evaluator_response_spec.py create mode 100644 src/humanloop/types/evaluator_return_type_enum.py create mode 100644 src/humanloop/types/evaluator_version_id.py create mode 100644 src/humanloop/types/evaluators_request.py create mode 100644 src/humanloop/types/event_type.py create mode 100644 src/humanloop/types/external_evaluator_request.py create mode 100644 src/humanloop/types/feedback_type.py create mode 100644 src/humanloop/types/file_environment_response.py create mode 100644 src/humanloop/types/file_environment_response_file.py create mode 100644 src/humanloop/types/file_environment_variable_request.py create mode 100644 src/humanloop/types/file_id.py create mode 100644 src/humanloop/types/file_path.py create mode 100644 src/humanloop/types/file_request.py create mode 100644 src/humanloop/types/file_sort_by.py create mode 100644 src/humanloop/types/file_type.py create mode 100644 src/humanloop/types/files_tool_type.py create mode 100644 src/humanloop/types/flow_kernel_request.py create mode 100644 src/humanloop/types/flow_log_response.py create mode 100644 src/humanloop/types/flow_response.py create mode 100644 src/humanloop/types/function_tool.py create mode 100644 src/humanloop/types/function_tool_choice.py create mode 100644 src/humanloop/types/http_validation_error.py create mode 100644 src/humanloop/types/human_evaluator_request.py create mode 100644 src/humanloop/types/human_evaluator_request_return_type.py create mode 100644 src/humanloop/types/image_chat_content.py create mode 100644 src/humanloop/types/image_url.py create mode 100644 src/humanloop/types/image_url_detail.py create mode 100644 src/humanloop/types/input_response.py create mode 100644 src/humanloop/types/linked_file_request.py create mode 100644 src/humanloop/types/linked_tool_response.py create mode 100644 src/humanloop/types/list_agents.py create mode 100644 src/humanloop/types/list_datasets.py create mode 100644 src/humanloop/types/list_evaluators.py create mode 100644 src/humanloop/types/list_flows.py create mode 100644 src/humanloop/types/list_prompts.py create mode 100644 src/humanloop/types/list_tools.py create mode 100644 src/humanloop/types/llm_evaluator_request.py create mode 100644 src/humanloop/types/log_response.py create mode 100644 src/humanloop/types/log_status.py create mode 100644 src/humanloop/types/log_stream_response.py create mode 100644 src/humanloop/types/model_endpoints.py create mode 100644 src/humanloop/types/model_providers.py create mode 100644 src/humanloop/types/monitoring_evaluator_environment_request.py create mode 100644 src/humanloop/types/monitoring_evaluator_response.py create mode 100644 src/humanloop/types/monitoring_evaluator_state.py create mode 100644 src/humanloop/types/monitoring_evaluator_version_request.py create mode 100644 src/humanloop/types/numeric_evaluator_stats_response.py create mode 100644 src/humanloop/types/observability_status.py create mode 100644 src/humanloop/types/on_agent_call_enum.py create mode 100644 src/humanloop/types/open_ai_reasoning_effort.py create mode 100644 src/humanloop/types/overall_stats.py create mode 100644 src/humanloop/types/paginated_data_agent_response.py create mode 100644 src/humanloop/types/paginated_data_evaluation_log_response.py create mode 100644 src/humanloop/types/paginated_data_evaluator_response.py create mode 100644 src/humanloop/types/paginated_data_flow_response.py create mode 100644 src/humanloop/types/paginated_data_log_response.py create mode 100644 src/humanloop/types/paginated_data_prompt_response.py create mode 100644 src/humanloop/types/paginated_data_tool_response.py create mode 100644 src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py create mode 100644 src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py create mode 100644 src/humanloop/types/paginated_datapoint_response.py create mode 100644 src/humanloop/types/paginated_dataset_response.py create mode 100644 src/humanloop/types/paginated_evaluation_response.py create mode 100644 src/humanloop/types/paginated_prompt_log_response.py create mode 100644 src/humanloop/types/paginated_session_response.py create mode 100644 src/humanloop/types/platform_access_enum.py create mode 100644 src/humanloop/types/populate_template_response.py create mode 100644 src/humanloop/types/populate_template_response_populated_template.py create mode 100644 src/humanloop/types/populate_template_response_reasoning_effort.py create mode 100644 src/humanloop/types/populate_template_response_stop.py create mode 100644 src/humanloop/types/populate_template_response_template.py create mode 100644 src/humanloop/types/prompt_call_log_response.py create mode 100644 src/humanloop/types/prompt_call_response.py create mode 100644 src/humanloop/types/prompt_call_response_tool_choice.py create mode 100644 src/humanloop/types/prompt_call_stream_response.py create mode 100644 src/humanloop/types/prompt_kernel_request.py create mode 100644 src/humanloop/types/prompt_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/types/prompt_kernel_request_stop.py create mode 100644 src/humanloop/types/prompt_kernel_request_template.py create mode 100644 src/humanloop/types/prompt_log_response.py create mode 100644 src/humanloop/types/prompt_log_response_tool_choice.py create mode 100644 src/humanloop/types/prompt_response.py create mode 100644 src/humanloop/types/prompt_response_reasoning_effort.py create mode 100644 src/humanloop/types/prompt_response_stop.py create mode 100644 src/humanloop/types/prompt_response_template.py create mode 100644 src/humanloop/types/provider_api_keys.py create mode 100644 src/humanloop/types/response_format.py create mode 100644 src/humanloop/types/response_format_type.py create mode 100644 src/humanloop/types/run_stats_response.py create mode 100644 src/humanloop/types/run_stats_response_evaluator_stats_item.py create mode 100644 src/humanloop/types/run_version_response.py create mode 100644 src/humanloop/types/select_evaluator_stats_response.py create mode 100644 src/humanloop/types/sort_order.py create mode 100644 src/humanloop/types/template_language.py create mode 100644 src/humanloop/types/text_chat_content.py create mode 100644 src/humanloop/types/text_evaluator_stats_response.py create mode 100644 src/humanloop/types/time_unit.py create mode 100644 src/humanloop/types/tool_call.py create mode 100644 src/humanloop/types/tool_call_response.py create mode 100644 src/humanloop/types/tool_choice.py create mode 100644 src/humanloop/types/tool_function.py create mode 100644 src/humanloop/types/tool_kernel_request.py create mode 100644 src/humanloop/types/tool_log_response.py create mode 100644 src/humanloop/types/tool_response.py create mode 100644 src/humanloop/types/update_dateset_action.py create mode 100644 src/humanloop/types/update_evaluation_status_request.py create mode 100644 src/humanloop/types/update_version_request.py create mode 100644 src/humanloop/types/user_response.py create mode 100644 src/humanloop/types/valence.py create mode 100644 src/humanloop/types/validation_error.py create mode 100644 src/humanloop/types/validation_error_loc_item.py create mode 100644 src/humanloop/types/version_deployment_response.py create mode 100644 src/humanloop/types/version_deployment_response_file.py create mode 100644 src/humanloop/types/version_id.py create mode 100644 src/humanloop/types/version_id_response.py create mode 100644 src/humanloop/types/version_id_response_version.py create mode 100644 src/humanloop/types/version_reference_response.py create mode 100644 src/humanloop/types/version_stats_response.py create mode 100644 src/humanloop/types/version_stats_response_evaluator_version_stats_item.py create mode 100644 src/humanloop/types/version_status.py create mode 100644 src/humanloop/version.py diff --git a/.gitignore b/.gitignore index f5cda9d9..a55ede77 100644 --- a/.gitignore +++ b/.gitignore @@ -7,5 +7,3 @@ poetry.toml .env tests/assets/*.jsonl tests/assets/*.parquet -# Ignore humanloop directory which could mistakenly be committed when testing sync functionality as it's used as the default sync directory -humanloop diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py new file mode 100644 index 00000000..407e3fb6 --- /dev/null +++ b/src/humanloop/__init__.py @@ -0,0 +1,872 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import ( + AgentCallResponse, + AgentCallResponseToolChoice, + AgentCallStreamResponse, + AgentCallStreamResponsePayload, + AgentConfigResponse, + AgentContinueCallResponse, + AgentContinueCallResponseToolChoice, + AgentContinueCallStreamResponse, + AgentContinueCallStreamResponsePayload, + AgentInlineTool, + AgentKernelRequest, + AgentKernelRequestReasoningEffort, + AgentKernelRequestStop, + AgentKernelRequestTemplate, + AgentKernelRequestToolsItem, + AgentLinkedFileRequest, + AgentLinkedFileResponse, + AgentLinkedFileResponseFile, + AgentLogResponse, + AgentLogResponseToolChoice, + AgentLogStreamResponse, + AgentResponse, + AgentResponseReasoningEffort, + AgentResponseStop, + AgentResponseTemplate, + AgentResponseToolsItem, + AnthropicRedactedThinkingContent, + AnthropicThinkingContent, + BaseModelsUserResponse, + BooleanEvaluatorStatsResponse, + ChatMessage, + ChatMessageContent, + ChatMessageContentItem, + ChatMessageThinkingItem, + ChatRole, + ChatToolType, + CodeEvaluatorRequest, + ConfigToolResponse, + CreateAgentLogResponse, + CreateDatapointRequest, + CreateDatapointRequestTargetValue, + CreateEvaluatorLogResponse, + CreateFlowLogResponse, + CreatePromptLogResponse, + CreateToolLogResponse, + DashboardConfiguration, + DatapointResponse, + DatapointResponseTargetValue, + DatasetResponse, + DatasetsRequest, + DirectoryResponse, + DirectoryWithParentsAndChildrenResponse, + DirectoryWithParentsAndChildrenResponseFilesItem, + EnvironmentResponse, + EnvironmentTag, + EvaluateeRequest, + EvaluateeResponse, + EvaluationEvaluatorResponse, + EvaluationLogResponse, + EvaluationResponse, + EvaluationRunResponse, + EvaluationRunsResponse, + EvaluationStats, + EvaluationStatus, + EvaluationsDatasetRequest, + EvaluationsRequest, + EvaluatorActivationDeactivationRequest, + EvaluatorActivationDeactivationRequestActivateItem, + EvaluatorActivationDeactivationRequestDeactivateItem, + EvaluatorAggregate, + EvaluatorArgumentsType, + EvaluatorConfigResponse, + EvaluatorFileId, + EvaluatorFilePath, + EvaluatorJudgmentNumberLimit, + EvaluatorJudgmentOptionResponse, + EvaluatorLogResponse, + EvaluatorLogResponseJudgment, + EvaluatorResponse, + EvaluatorResponseSpec, + EvaluatorReturnTypeEnum, + EvaluatorVersionId, + EvaluatorsRequest, + EventType, + ExternalEvaluatorRequest, + FeedbackType, + FileEnvironmentResponse, + FileEnvironmentResponseFile, + FileEnvironmentVariableRequest, + FileId, + FilePath, + FileRequest, + FileSortBy, + FileType, + FilesToolType, + FlowKernelRequest, + FlowLogResponse, + FlowResponse, + FunctionTool, + FunctionToolChoice, + HttpValidationError, + HumanEvaluatorRequest, + HumanEvaluatorRequestReturnType, + ImageChatContent, + ImageUrl, + ImageUrlDetail, + InputResponse, + LinkedFileRequest, + LinkedToolResponse, + ListAgents, + ListDatasets, + ListEvaluators, + ListFlows, + ListPrompts, + ListTools, + LlmEvaluatorRequest, + LogResponse, + LogStatus, + LogStreamResponse, + ModelEndpoints, + ModelProviders, + MonitoringEvaluatorEnvironmentRequest, + MonitoringEvaluatorResponse, + MonitoringEvaluatorState, + MonitoringEvaluatorVersionRequest, + NumericEvaluatorStatsResponse, + ObservabilityStatus, + OnAgentCallEnum, + OpenAiReasoningEffort, + OverallStats, + PaginatedDataAgentResponse, + PaginatedDataEvaluationLogResponse, + PaginatedDataEvaluatorResponse, + PaginatedDataFlowResponse, + PaginatedDataLogResponse, + PaginatedDataPromptResponse, + PaginatedDataToolResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, + PaginatedDatapointResponse, + PaginatedDatasetResponse, + PaginatedEvaluationResponse, + PaginatedPromptLogResponse, + PaginatedSessionResponse, + PlatformAccessEnum, + PopulateTemplateResponse, + PopulateTemplateResponsePopulatedTemplate, + PopulateTemplateResponseReasoningEffort, + PopulateTemplateResponseStop, + PopulateTemplateResponseTemplate, + PromptCallLogResponse, + PromptCallResponse, + PromptCallResponseToolChoice, + PromptCallStreamResponse, + PromptKernelRequest, + PromptKernelRequestReasoningEffort, + PromptKernelRequestStop, + PromptKernelRequestTemplate, + PromptLogResponse, + PromptLogResponseToolChoice, + PromptResponse, + PromptResponseReasoningEffort, + PromptResponseStop, + PromptResponseTemplate, + ProviderApiKeys, + ResponseFormat, + ResponseFormatType, + RunStatsResponse, + RunStatsResponseEvaluatorStatsItem, + RunVersionResponse, + SelectEvaluatorStatsResponse, + SortOrder, + TemplateLanguage, + TextChatContent, + TextEvaluatorStatsResponse, + TimeUnit, + ToolCall, + ToolCallResponse, + ToolChoice, + ToolFunction, + ToolKernelRequest, + ToolLogResponse, + ToolResponse, + UpdateDatesetAction, + UpdateEvaluationStatusRequest, + UpdateVersionRequest, + UserResponse, + Valence, + ValidationError, + ValidationErrorLocItem, + VersionDeploymentResponse, + VersionDeploymentResponseFile, + VersionId, + VersionIdResponse, + VersionIdResponseVersion, + VersionReferenceResponse, + VersionStatsResponse, + VersionStatsResponseEvaluatorVersionStatsItem, + VersionStatus, +) +from .errors import UnprocessableEntityError +from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools +from .agents import ( + AgentLogRequestAgent, + AgentLogRequestAgentParams, + AgentLogRequestToolChoice, + AgentLogRequestToolChoiceParams, + AgentRequestReasoningEffort, + AgentRequestReasoningEffortParams, + AgentRequestStop, + AgentRequestStopParams, + AgentRequestTemplate, + AgentRequestTemplateParams, + AgentRequestToolsItem, + AgentRequestToolsItemParams, + AgentsCallRequestAgent, + AgentsCallRequestAgentParams, + AgentsCallRequestToolChoice, + AgentsCallRequestToolChoiceParams, + AgentsCallStreamRequestAgent, + AgentsCallStreamRequestAgentParams, + AgentsCallStreamRequestToolChoice, + AgentsCallStreamRequestToolChoiceParams, +) +from .client import AsyncHumanloop, Humanloop +from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints +from .environment import HumanloopEnvironment +from .evaluations import ( + AddEvaluatorsRequestEvaluatorsItem, + AddEvaluatorsRequestEvaluatorsItemParams, + CreateEvaluationRequestEvaluatorsItem, + CreateEvaluationRequestEvaluatorsItemParams, + CreateRunRequestDataset, + CreateRunRequestDatasetParams, + CreateRunRequestVersion, + CreateRunRequestVersionParams, +) +from .evaluators import ( + CreateEvaluatorLogRequestJudgment, + CreateEvaluatorLogRequestJudgmentParams, + CreateEvaluatorLogRequestSpec, + CreateEvaluatorLogRequestSpecParams, + EvaluatorRequestSpec, + EvaluatorRequestSpecParams, +) +from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams +from .prompts import ( + PromptLogRequestPrompt, + PromptLogRequestPromptParams, + PromptLogRequestToolChoice, + PromptLogRequestToolChoiceParams, + PromptLogUpdateRequestToolChoice, + PromptLogUpdateRequestToolChoiceParams, + PromptRequestReasoningEffort, + PromptRequestReasoningEffortParams, + PromptRequestStop, + PromptRequestStopParams, + PromptRequestTemplate, + PromptRequestTemplateParams, + PromptsCallRequestPrompt, + PromptsCallRequestPromptParams, + PromptsCallRequestToolChoice, + PromptsCallRequestToolChoiceParams, + PromptsCallStreamRequestPrompt, + PromptsCallStreamRequestPromptParams, + PromptsCallStreamRequestToolChoice, + PromptsCallStreamRequestToolChoiceParams, +) +from .requests import ( + AgentCallResponseParams, + AgentCallResponseToolChoiceParams, + AgentCallStreamResponseParams, + AgentCallStreamResponsePayloadParams, + AgentConfigResponseParams, + AgentContinueCallResponseParams, + AgentContinueCallResponseToolChoiceParams, + AgentContinueCallStreamResponseParams, + AgentContinueCallStreamResponsePayloadParams, + AgentInlineToolParams, + AgentKernelRequestParams, + AgentKernelRequestReasoningEffortParams, + AgentKernelRequestStopParams, + AgentKernelRequestTemplateParams, + AgentKernelRequestToolsItemParams, + AgentLinkedFileRequestParams, + AgentLinkedFileResponseFileParams, + AgentLinkedFileResponseParams, + AgentLogResponseParams, + AgentLogResponseToolChoiceParams, + AgentLogStreamResponseParams, + AgentResponseParams, + AgentResponseReasoningEffortParams, + AgentResponseStopParams, + AgentResponseTemplateParams, + AgentResponseToolsItemParams, + AnthropicRedactedThinkingContentParams, + AnthropicThinkingContentParams, + BooleanEvaluatorStatsResponseParams, + ChatMessageContentItemParams, + ChatMessageContentParams, + ChatMessageParams, + ChatMessageThinkingItemParams, + CodeEvaluatorRequestParams, + CreateAgentLogResponseParams, + CreateDatapointRequestParams, + CreateDatapointRequestTargetValueParams, + CreateEvaluatorLogResponseParams, + CreateFlowLogResponseParams, + CreatePromptLogResponseParams, + CreateToolLogResponseParams, + DashboardConfigurationParams, + DatapointResponseParams, + DatapointResponseTargetValueParams, + DatasetResponseParams, + DirectoryResponseParams, + DirectoryWithParentsAndChildrenResponseFilesItemParams, + DirectoryWithParentsAndChildrenResponseParams, + EnvironmentResponseParams, + EvaluateeRequestParams, + EvaluateeResponseParams, + EvaluationEvaluatorResponseParams, + EvaluationLogResponseParams, + EvaluationResponseParams, + EvaluationRunResponseParams, + EvaluationRunsResponseParams, + EvaluationStatsParams, + EvaluatorActivationDeactivationRequestActivateItemParams, + EvaluatorActivationDeactivationRequestDeactivateItemParams, + EvaluatorActivationDeactivationRequestParams, + EvaluatorAggregateParams, + EvaluatorConfigResponseParams, + EvaluatorFileIdParams, + EvaluatorFilePathParams, + EvaluatorJudgmentNumberLimitParams, + EvaluatorJudgmentOptionResponseParams, + EvaluatorLogResponseJudgmentParams, + EvaluatorLogResponseParams, + EvaluatorResponseParams, + EvaluatorResponseSpecParams, + EvaluatorVersionIdParams, + ExternalEvaluatorRequestParams, + FileEnvironmentResponseFileParams, + FileEnvironmentResponseParams, + FileEnvironmentVariableRequestParams, + FileIdParams, + FilePathParams, + FileRequestParams, + FlowKernelRequestParams, + FlowLogResponseParams, + FlowResponseParams, + FunctionToolChoiceParams, + FunctionToolParams, + HttpValidationErrorParams, + HumanEvaluatorRequestParams, + ImageChatContentParams, + ImageUrlParams, + InputResponseParams, + LinkedFileRequestParams, + LinkedToolResponseParams, + ListAgentsParams, + ListDatasetsParams, + ListEvaluatorsParams, + ListFlowsParams, + ListPromptsParams, + ListToolsParams, + LlmEvaluatorRequestParams, + LogResponseParams, + LogStreamResponseParams, + MonitoringEvaluatorEnvironmentRequestParams, + MonitoringEvaluatorResponseParams, + MonitoringEvaluatorVersionRequestParams, + NumericEvaluatorStatsResponseParams, + OverallStatsParams, + PaginatedDataAgentResponseParams, + PaginatedDataEvaluationLogResponseParams, + PaginatedDataEvaluatorResponseParams, + PaginatedDataFlowResponseParams, + PaginatedDataLogResponseParams, + PaginatedDataPromptResponseParams, + PaginatedDataToolResponseParams, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, + PaginatedDatapointResponseParams, + PaginatedDatasetResponseParams, + PaginatedEvaluationResponseParams, + PopulateTemplateResponseParams, + PopulateTemplateResponsePopulatedTemplateParams, + PopulateTemplateResponseReasoningEffortParams, + PopulateTemplateResponseStopParams, + PopulateTemplateResponseTemplateParams, + PromptCallLogResponseParams, + PromptCallResponseParams, + PromptCallResponseToolChoiceParams, + PromptCallStreamResponseParams, + PromptKernelRequestParams, + PromptKernelRequestReasoningEffortParams, + PromptKernelRequestStopParams, + PromptKernelRequestTemplateParams, + PromptLogResponseParams, + PromptLogResponseToolChoiceParams, + PromptResponseParams, + PromptResponseReasoningEffortParams, + PromptResponseStopParams, + PromptResponseTemplateParams, + ProviderApiKeysParams, + ResponseFormatParams, + RunStatsResponseEvaluatorStatsItemParams, + RunStatsResponseParams, + RunVersionResponseParams, + SelectEvaluatorStatsResponseParams, + TextChatContentParams, + TextEvaluatorStatsResponseParams, + ToolCallParams, + ToolCallResponseParams, + ToolChoiceParams, + ToolFunctionParams, + ToolKernelRequestParams, + ToolLogResponseParams, + ToolResponseParams, + UpdateVersionRequestParams, + ValidationErrorLocItemParams, + ValidationErrorParams, + VersionDeploymentResponseFileParams, + VersionDeploymentResponseParams, + VersionIdParams, + VersionIdResponseParams, + VersionIdResponseVersionParams, + VersionReferenceResponseParams, + VersionStatsResponseEvaluatorVersionStatsItemParams, + VersionStatsResponseParams, +) +from .version import __version__ + +__all__ = [ + "AddEvaluatorsRequestEvaluatorsItem", + "AddEvaluatorsRequestEvaluatorsItemParams", + "AgentCallResponse", + "AgentCallResponseParams", + "AgentCallResponseToolChoice", + "AgentCallResponseToolChoiceParams", + "AgentCallStreamResponse", + "AgentCallStreamResponseParams", + "AgentCallStreamResponsePayload", + "AgentCallStreamResponsePayloadParams", + "AgentConfigResponse", + "AgentConfigResponseParams", + "AgentContinueCallResponse", + "AgentContinueCallResponseParams", + "AgentContinueCallResponseToolChoice", + "AgentContinueCallResponseToolChoiceParams", + "AgentContinueCallStreamResponse", + "AgentContinueCallStreamResponseParams", + "AgentContinueCallStreamResponsePayload", + "AgentContinueCallStreamResponsePayloadParams", + "AgentInlineTool", + "AgentInlineToolParams", + "AgentKernelRequest", + "AgentKernelRequestParams", + "AgentKernelRequestReasoningEffort", + "AgentKernelRequestReasoningEffortParams", + "AgentKernelRequestStop", + "AgentKernelRequestStopParams", + "AgentKernelRequestTemplate", + "AgentKernelRequestTemplateParams", + "AgentKernelRequestToolsItem", + "AgentKernelRequestToolsItemParams", + "AgentLinkedFileRequest", + "AgentLinkedFileRequestParams", + "AgentLinkedFileResponse", + "AgentLinkedFileResponseFile", + "AgentLinkedFileResponseFileParams", + "AgentLinkedFileResponseParams", + "AgentLogRequestAgent", + "AgentLogRequestAgentParams", + "AgentLogRequestToolChoice", + "AgentLogRequestToolChoiceParams", + "AgentLogResponse", + "AgentLogResponseParams", + "AgentLogResponseToolChoice", + "AgentLogResponseToolChoiceParams", + "AgentLogStreamResponse", + "AgentLogStreamResponseParams", + "AgentRequestReasoningEffort", + "AgentRequestReasoningEffortParams", + "AgentRequestStop", + "AgentRequestStopParams", + "AgentRequestTemplate", + "AgentRequestTemplateParams", + "AgentRequestToolsItem", + "AgentRequestToolsItemParams", + "AgentResponse", + "AgentResponseParams", + "AgentResponseReasoningEffort", + "AgentResponseReasoningEffortParams", + "AgentResponseStop", + "AgentResponseStopParams", + "AgentResponseTemplate", + "AgentResponseTemplateParams", + "AgentResponseToolsItem", + "AgentResponseToolsItemParams", + "AgentsCallRequestAgent", + "AgentsCallRequestAgentParams", + "AgentsCallRequestToolChoice", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestAgent", + "AgentsCallStreamRequestAgentParams", + "AgentsCallStreamRequestToolChoice", + "AgentsCallStreamRequestToolChoiceParams", + "AnthropicRedactedThinkingContent", + "AnthropicRedactedThinkingContentParams", + "AnthropicThinkingContent", + "AnthropicThinkingContentParams", + "AsyncHumanloop", + "BaseModelsUserResponse", + "BooleanEvaluatorStatsResponse", + "BooleanEvaluatorStatsResponseParams", + "ChatMessage", + "ChatMessageContent", + "ChatMessageContentItem", + "ChatMessageContentItemParams", + "ChatMessageContentParams", + "ChatMessageParams", + "ChatMessageThinkingItem", + "ChatMessageThinkingItemParams", + "ChatRole", + "ChatToolType", + "CodeEvaluatorRequest", + "CodeEvaluatorRequestParams", + "ConfigToolResponse", + "CreateAgentLogResponse", + "CreateAgentLogResponseParams", + "CreateDatapointRequest", + "CreateDatapointRequestParams", + "CreateDatapointRequestTargetValue", + "CreateDatapointRequestTargetValueParams", + "CreateEvaluationRequestEvaluatorsItem", + "CreateEvaluationRequestEvaluatorsItemParams", + "CreateEvaluatorLogRequestJudgment", + "CreateEvaluatorLogRequestJudgmentParams", + "CreateEvaluatorLogRequestSpec", + "CreateEvaluatorLogRequestSpecParams", + "CreateEvaluatorLogResponse", + "CreateEvaluatorLogResponseParams", + "CreateFlowLogResponse", + "CreateFlowLogResponseParams", + "CreatePromptLogResponse", + "CreatePromptLogResponseParams", + "CreateRunRequestDataset", + "CreateRunRequestDatasetParams", + "CreateRunRequestVersion", + "CreateRunRequestVersionParams", + "CreateToolLogResponse", + "CreateToolLogResponseParams", + "DashboardConfiguration", + "DashboardConfigurationParams", + "DatapointResponse", + "DatapointResponseParams", + "DatapointResponseTargetValue", + "DatapointResponseTargetValueParams", + "DatasetResponse", + "DatasetResponseParams", + "DatasetsRequest", + "DirectoryResponse", + "DirectoryResponseParams", + "DirectoryWithParentsAndChildrenResponse", + "DirectoryWithParentsAndChildrenResponseFilesItem", + "DirectoryWithParentsAndChildrenResponseFilesItemParams", + "DirectoryWithParentsAndChildrenResponseParams", + "EnvironmentResponse", + "EnvironmentResponseParams", + "EnvironmentTag", + "EvaluateeRequest", + "EvaluateeRequestParams", + "EvaluateeResponse", + "EvaluateeResponseParams", + "EvaluationEvaluatorResponse", + "EvaluationEvaluatorResponseParams", + "EvaluationLogResponse", + "EvaluationLogResponseParams", + "EvaluationResponse", + "EvaluationResponseParams", + "EvaluationRunResponse", + "EvaluationRunResponseParams", + "EvaluationRunsResponse", + "EvaluationRunsResponseParams", + "EvaluationStats", + "EvaluationStatsParams", + "EvaluationStatus", + "EvaluationsDatasetRequest", + "EvaluationsRequest", + "EvaluatorActivationDeactivationRequest", + "EvaluatorActivationDeactivationRequestActivateItem", + "EvaluatorActivationDeactivationRequestActivateItemParams", + "EvaluatorActivationDeactivationRequestDeactivateItem", + "EvaluatorActivationDeactivationRequestDeactivateItemParams", + "EvaluatorActivationDeactivationRequestParams", + "EvaluatorAggregate", + "EvaluatorAggregateParams", + "EvaluatorArgumentsType", + "EvaluatorConfigResponse", + "EvaluatorConfigResponseParams", + "EvaluatorFileId", + "EvaluatorFileIdParams", + "EvaluatorFilePath", + "EvaluatorFilePathParams", + "EvaluatorJudgmentNumberLimit", + "EvaluatorJudgmentNumberLimitParams", + "EvaluatorJudgmentOptionResponse", + "EvaluatorJudgmentOptionResponseParams", + "EvaluatorLogResponse", + "EvaluatorLogResponseJudgment", + "EvaluatorLogResponseJudgmentParams", + "EvaluatorLogResponseParams", + "EvaluatorRequestSpec", + "EvaluatorRequestSpecParams", + "EvaluatorResponse", + "EvaluatorResponseParams", + "EvaluatorResponseSpec", + "EvaluatorResponseSpecParams", + "EvaluatorReturnTypeEnum", + "EvaluatorVersionId", + "EvaluatorVersionIdParams", + "EvaluatorsRequest", + "EventType", + "ExternalEvaluatorRequest", + "ExternalEvaluatorRequestParams", + "FeedbackType", + "FileEnvironmentResponse", + "FileEnvironmentResponseFile", + "FileEnvironmentResponseFileParams", + "FileEnvironmentResponseParams", + "FileEnvironmentVariableRequest", + "FileEnvironmentVariableRequestParams", + "FileId", + "FileIdParams", + "FilePath", + "FilePathParams", + "FileRequest", + "FileRequestParams", + "FileSortBy", + "FileType", + "FilesToolType", + "FlowKernelRequest", + "FlowKernelRequestParams", + "FlowLogResponse", + "FlowLogResponseParams", + "FlowResponse", + "FlowResponseParams", + "FunctionTool", + "FunctionToolChoice", + "FunctionToolChoiceParams", + "FunctionToolParams", + "HttpValidationError", + "HttpValidationErrorParams", + "HumanEvaluatorRequest", + "HumanEvaluatorRequestParams", + "HumanEvaluatorRequestReturnType", + "Humanloop", + "HumanloopEnvironment", + "ImageChatContent", + "ImageChatContentParams", + "ImageUrl", + "ImageUrlDetail", + "ImageUrlParams", + "InputResponse", + "InputResponseParams", + "LinkedFileRequest", + "LinkedFileRequestParams", + "LinkedToolResponse", + "LinkedToolResponseParams", + "ListAgents", + "ListAgentsParams", + "ListDatasets", + "ListDatasetsParams", + "ListEvaluators", + "ListEvaluatorsParams", + "ListFlows", + "ListFlowsParams", + "ListPrompts", + "ListPromptsParams", + "ListTools", + "ListToolsParams", + "ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints", + "LlmEvaluatorRequest", + "LlmEvaluatorRequestParams", + "LogResponse", + "LogResponseParams", + "LogStatus", + "LogStreamResponse", + "LogStreamResponseParams", + "ModelEndpoints", + "ModelProviders", + "MonitoringEvaluatorEnvironmentRequest", + "MonitoringEvaluatorEnvironmentRequestParams", + "MonitoringEvaluatorResponse", + "MonitoringEvaluatorResponseParams", + "MonitoringEvaluatorState", + "MonitoringEvaluatorVersionRequest", + "MonitoringEvaluatorVersionRequestParams", + "NumericEvaluatorStatsResponse", + "NumericEvaluatorStatsResponseParams", + "ObservabilityStatus", + "OnAgentCallEnum", + "OpenAiReasoningEffort", + "OverallStats", + "OverallStatsParams", + "PaginatedDataAgentResponse", + "PaginatedDataAgentResponseParams", + "PaginatedDataEvaluationLogResponse", + "PaginatedDataEvaluationLogResponseParams", + "PaginatedDataEvaluatorResponse", + "PaginatedDataEvaluatorResponseParams", + "PaginatedDataFlowResponse", + "PaginatedDataFlowResponseParams", + "PaginatedDataLogResponse", + "PaginatedDataLogResponseParams", + "PaginatedDataPromptResponse", + "PaginatedDataPromptResponseParams", + "PaginatedDataToolResponse", + "PaginatedDataToolResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", + "PaginatedDatapointResponse", + "PaginatedDatapointResponseParams", + "PaginatedDatasetResponse", + "PaginatedDatasetResponseParams", + "PaginatedEvaluationResponse", + "PaginatedEvaluationResponseParams", + "PaginatedPromptLogResponse", + "PaginatedSessionResponse", + "PlatformAccessEnum", + "PopulateTemplateResponse", + "PopulateTemplateResponseParams", + "PopulateTemplateResponsePopulatedTemplate", + "PopulateTemplateResponsePopulatedTemplateParams", + "PopulateTemplateResponseReasoningEffort", + "PopulateTemplateResponseReasoningEffortParams", + "PopulateTemplateResponseStop", + "PopulateTemplateResponseStopParams", + "PopulateTemplateResponseTemplate", + "PopulateTemplateResponseTemplateParams", + "PromptCallLogResponse", + "PromptCallLogResponseParams", + "PromptCallResponse", + "PromptCallResponseParams", + "PromptCallResponseToolChoice", + "PromptCallResponseToolChoiceParams", + "PromptCallStreamResponse", + "PromptCallStreamResponseParams", + "PromptKernelRequest", + "PromptKernelRequestParams", + "PromptKernelRequestReasoningEffort", + "PromptKernelRequestReasoningEffortParams", + "PromptKernelRequestStop", + "PromptKernelRequestStopParams", + "PromptKernelRequestTemplate", + "PromptKernelRequestTemplateParams", + "PromptLogRequestPrompt", + "PromptLogRequestPromptParams", + "PromptLogRequestToolChoice", + "PromptLogRequestToolChoiceParams", + "PromptLogResponse", + "PromptLogResponseParams", + "PromptLogResponseToolChoice", + "PromptLogResponseToolChoiceParams", + "PromptLogUpdateRequestToolChoice", + "PromptLogUpdateRequestToolChoiceParams", + "PromptRequestReasoningEffort", + "PromptRequestReasoningEffortParams", + "PromptRequestStop", + "PromptRequestStopParams", + "PromptRequestTemplate", + "PromptRequestTemplateParams", + "PromptResponse", + "PromptResponseParams", + "PromptResponseReasoningEffort", + "PromptResponseReasoningEffortParams", + "PromptResponseStop", + "PromptResponseStopParams", + "PromptResponseTemplate", + "PromptResponseTemplateParams", + "PromptsCallRequestPrompt", + "PromptsCallRequestPromptParams", + "PromptsCallRequestToolChoice", + "PromptsCallRequestToolChoiceParams", + "PromptsCallStreamRequestPrompt", + "PromptsCallStreamRequestPromptParams", + "PromptsCallStreamRequestToolChoice", + "PromptsCallStreamRequestToolChoiceParams", + "ProviderApiKeys", + "ProviderApiKeysParams", + "ResponseFormat", + "ResponseFormatParams", + "ResponseFormatType", + "RetrieveByPathFilesRetrieveByPathPostResponse", + "RetrieveByPathFilesRetrieveByPathPostResponseParams", + "RunStatsResponse", + "RunStatsResponseEvaluatorStatsItem", + "RunStatsResponseEvaluatorStatsItemParams", + "RunStatsResponseParams", + "RunVersionResponse", + "RunVersionResponseParams", + "SelectEvaluatorStatsResponse", + "SelectEvaluatorStatsResponseParams", + "SortOrder", + "TemplateLanguage", + "TextChatContent", + "TextChatContentParams", + "TextEvaluatorStatsResponse", + "TextEvaluatorStatsResponseParams", + "TimeUnit", + "ToolCall", + "ToolCallParams", + "ToolCallResponse", + "ToolCallResponseParams", + "ToolChoice", + "ToolChoiceParams", + "ToolFunction", + "ToolFunctionParams", + "ToolKernelRequest", + "ToolKernelRequestParams", + "ToolLogResponse", + "ToolLogResponseParams", + "ToolResponse", + "ToolResponseParams", + "UnprocessableEntityError", + "UpdateDatesetAction", + "UpdateEvaluationStatusRequest", + "UpdateVersionRequest", + "UpdateVersionRequestParams", + "UserResponse", + "Valence", + "ValidationError", + "ValidationErrorLocItem", + "ValidationErrorLocItemParams", + "ValidationErrorParams", + "VersionDeploymentResponse", + "VersionDeploymentResponseFile", + "VersionDeploymentResponseFileParams", + "VersionDeploymentResponseParams", + "VersionId", + "VersionIdParams", + "VersionIdResponse", + "VersionIdResponseParams", + "VersionIdResponseVersion", + "VersionIdResponseVersionParams", + "VersionReferenceResponse", + "VersionReferenceResponseParams", + "VersionStatsResponse", + "VersionStatsResponseEvaluatorVersionStatsItem", + "VersionStatsResponseEvaluatorVersionStatsItemParams", + "VersionStatsResponseParams", + "VersionStatus", + "__version__", + "agents", + "datasets", + "directories", + "evaluations", + "evaluators", + "files", + "flows", + "logs", + "prompts", + "tools", +] diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py new file mode 100644 index 00000000..e8a63fd6 --- /dev/null +++ b/src/humanloop/agents/__init__.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import ( + AgentLogRequestAgent, + AgentLogRequestToolChoice, + AgentRequestReasoningEffort, + AgentRequestStop, + AgentRequestTemplate, + AgentRequestToolsItem, + AgentsCallRequestAgent, + AgentsCallRequestToolChoice, + AgentsCallStreamRequestAgent, + AgentsCallStreamRequestToolChoice, +) +from .requests import ( + AgentLogRequestAgentParams, + AgentLogRequestToolChoiceParams, + AgentRequestReasoningEffortParams, + AgentRequestStopParams, + AgentRequestTemplateParams, + AgentRequestToolsItemParams, + AgentsCallRequestAgentParams, + AgentsCallRequestToolChoiceParams, + AgentsCallStreamRequestAgentParams, + AgentsCallStreamRequestToolChoiceParams, +) + +__all__ = [ + "AgentLogRequestAgent", + "AgentLogRequestAgentParams", + "AgentLogRequestToolChoice", + "AgentLogRequestToolChoiceParams", + "AgentRequestReasoningEffort", + "AgentRequestReasoningEffortParams", + "AgentRequestStop", + "AgentRequestStopParams", + "AgentRequestTemplate", + "AgentRequestTemplateParams", + "AgentRequestToolsItem", + "AgentRequestToolsItemParams", + "AgentsCallRequestAgent", + "AgentsCallRequestAgentParams", + "AgentsCallRequestToolChoice", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestAgent", + "AgentsCallStreamRequestAgentParams", + "AgentsCallStreamRequestToolChoice", + "AgentsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py new file mode 100644 index 00000000..ab7b887c --- /dev/null +++ b/src/humanloop/agents/client.py @@ -0,0 +1,2946 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..requests.response_format import ResponseFormatParams +from ..types.agent_call_response import AgentCallResponse +from ..types.agent_call_stream_response import AgentCallStreamResponse +from ..types.agent_continue_call_response import AgentContinueCallResponse +from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse +from ..types.agent_kernel_request import AgentKernelRequest +from ..types.agent_log_response import AgentLogResponse +from ..types.agent_response import AgentResponse +from ..types.create_agent_log_response import CreateAgentLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.list_agents import ListAgents +from ..types.log_status import LogStatus +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.sort_order import SortOrder +from ..types.template_language import TemplateLanguage +from .raw_client import AsyncRawAgentsClient, RawAgentsClient +from .requests.agent_log_request_agent import AgentLogRequestAgentParams +from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams +from .requests.agent_request_stop import AgentRequestStopParams +from .requests.agent_request_template import AgentRequestTemplateParams +from .requests.agent_request_tools_item import AgentRequestToolsItemParams +from .requests.agents_call_request_agent import AgentsCallRequestAgentParams +from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams +from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class AgentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawAgentsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawAgentsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawAgentsClient + """ + return self._raw_client + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentLogResponse: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentLogRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' + , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} + , 'additionalProperties': False + , 'required': ['output'] + }, 'strict': True}, 'on_agent_call': "stop"}]}, ) + """ + _response = self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + agent=agent, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agent_log_request_environment=agent_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentLogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[{'role': "user", 'content': 'I need to withdraw $1000'}, {'role': "assistant", 'content': 'Of course! Would you like to use your savings or checking account?'}], output_message={'role': "assistant", 'content': "I'm sorry, I can't help with that."}, log_status="complete", ) + """ + _response = self._raw_client.update_log( + id, + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[AgentCallStreamResponse]: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallStreamRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[AgentCallStreamResponse] + + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.agents.call_stream() + for chunk in response: + yield chunk + """ + with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_stream_request_environment=agents_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + yield from r.data + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentCallResponse: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentCallResponse + + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], ) + """ + _response = self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_request_environment=agents_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return _response.data + + def continue_call_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[AgentContinueCallStreamResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[AgentContinueCallStreamResponse] + + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.agents.continue_call_stream(log_id='log_id', messages=[{'role': "user"}], ) + for chunk in response: + yield chunk + """ + with self._raw_client.continue_call_stream( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + yield from r.data + + def continue_call( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentContinueCallResponse: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentContinueCallResponse + + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.continue_call(log_id='log_1234567890', messages=[{'role': "tool", 'content': '{"type": "checking", "balance": 5200}', 'tool_call_id': 'tc_1234567890'}], ) + """ + _response = self._raw_client.continue_call( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return _response.data + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[AgentResponse]: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[AgentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.agents.list(size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.upsert(path='Banking/Teller Agent', provider="anthropic", endpoint="chat", model='claude-3-7-sonnet-latest', reasoning_effort=1024, template=[{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], max_iterations=3, tools=[{'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' + , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} + , 'additionalProperties': False + , 'required': ['output'] + }, 'strict': True}, 'on_agent_call': "stop"}], version_name='teller-agent-v1', version_description='Initial version', ) + """ + _response = self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + attributes=attributes, + max_iterations=max_iterations, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, + request_options=request_options, + ) + return _response.data + + def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.delete_agent_version(id='ag_1234567890', version_id='agv_1234567890', ) + """ + _response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options) + return _response.data + + def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.patch_agent_version(id='ag_1234567890', version_id='agv_1234567890', name='teller-agent-v2', description='Updated version', ) + """ + _response = self._raw_client.patch_agent_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.get(id='ag_1234567890', ) + """ + _response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.delete(id='ag_1234567890', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.move(id='ag_1234567890', path='new directory/new name', ) + """ + _response = self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options + ) + return _response.data + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListAgents: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgents + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.list_versions(id='ag_1234567890', ) + """ + _response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) + """ + _response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.remove_deployment(id='id', environment_id='environment_id', ) + """ + _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.list_environments(id='ag_1234567890', ) + """ + _response = self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.update_monitoring(id='ag_1234567890', activate=[{'evaluator_version_id': 'ev_1234567890'}, {'evaluator_id': 'ev_2345678901', 'environment_id': 'env_1234567890'}], deactivate=[{'evaluator_version_id': 'ev_0987654321'}], ) + """ + _response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.serialize(id='id', ) + """ + _response = self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentKernelRequest + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.agents.deserialize(agent='agent', ) + """ + _response = self._raw_client.deserialize(agent=agent, request_options=request_options) + return _response.data + + +class AsyncAgentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawAgentsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawAgentsClient + """ + return self._raw_client + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentLogResponse: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentLogRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' + , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} + , 'additionalProperties': False + , 'required': ['output'] + }, 'strict': True}, 'on_agent_call': "stop"}]}, ) + asyncio.run(main()) + """ + _response = await self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + agent=agent, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agent_log_request_environment=agent_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + async def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentLogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[{'role': "user", 'content': 'I need to withdraw $1000'}, {'role': "assistant", 'content': 'Of course! Would you like to use your savings or checking account?'}], output_message={'role': "assistant", 'content': "I'm sorry, I can't help with that."}, log_status="complete", ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_log( + id, + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AgentCallStreamResponse]: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallStreamRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AgentCallStreamResponse] + + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.agents.call_stream() + async for chunk in response: + yield chunk + asyncio.run(main()) + """ + async with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_stream_request_environment=agents_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + async for data in r.data: + yield data + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentCallResponse: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentCallResponse + + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_request_environment=agents_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return _response.data + + async def continue_call_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AgentContinueCallStreamResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AgentContinueCallStreamResponse] + + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.agents.continue_call_stream(log_id='log_id', messages=[{'role': "user"}], ) + async for chunk in response: + yield chunk + asyncio.run(main()) + """ + async with self._raw_client.continue_call_stream( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + async for data in r.data: + yield data + + async def continue_call( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentContinueCallResponse: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentContinueCallResponse + + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.continue_call(log_id='log_1234567890', messages=[{'role': "tool", 'content': '{"type": "checking", "balance": 5200}', 'tool_call_id': 'tc_1234567890'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.continue_call( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return _response.data + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[AgentResponse]: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[AgentResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.agents.list(size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.upsert(path='Banking/Teller Agent', provider="anthropic", endpoint="chat", model='claude-3-7-sonnet-latest', reasoning_effort=1024, template=[{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], max_iterations=3, tools=[{'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object' + , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}} + , 'additionalProperties': False + , 'required': ['output'] + }, 'strict': True}, 'on_agent_call': "stop"}], version_name='teller-agent-v1', version_description='Initial version', ) + asyncio.run(main()) + """ + _response = await self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + attributes=attributes, + max_iterations=max_iterations, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, + request_options=request_options, + ) + return _response.data + + async def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.delete_agent_version(id='ag_1234567890', version_id='agv_1234567890', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options) + return _response.data + + async def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.patch_agent_version(id='ag_1234567890', version_id='agv_1234567890', name='teller-agent-v2', description='Updated version', ) + asyncio.run(main()) + """ + _response = await self._raw_client.patch_agent_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.get(id='ag_1234567890', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.delete(id='ag_1234567890', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.move(id='ag_1234567890', path='new directory/new name', ) + asyncio.run(main()) + """ + _response = await self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options + ) + return _response.data + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListAgents: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgents + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.list_versions(id='ag_1234567890', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.remove_deployment(id='id', environment_id='environment_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.list_environments(id='ag_1234567890', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.update_monitoring(id='ag_1234567890', activate=[{'evaluator_version_id': 'ev_1234567890'}, {'evaluator_id': 'ev_2345678901', 'environment_id': 'env_1234567890'}], deactivate=[{'evaluator_version_id': 'ev_0987654321'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.serialize(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def deserialize( + self, *, agent: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentKernelRequest: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentKernelRequest + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.agents.deserialize(agent='agent', ) + asyncio.run(main()) + """ + _response = await self._raw_client.deserialize(agent=agent, request_options=request_options) + return _response.data diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py new file mode 100644 index 00000000..e577f8c2 --- /dev/null +++ b/src/humanloop/agents/raw_client.py @@ -0,0 +1,4021 @@ +# This file was auto-generated by Fern from our API Definition. + +import contextlib +import datetime as dt +import json +import typing +from json.decoder import JSONDecodeError + +import httpx_sse +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..requests.response_format import ResponseFormatParams +from ..types.agent_call_response import AgentCallResponse +from ..types.agent_call_stream_response import AgentCallStreamResponse +from ..types.agent_continue_call_response import AgentContinueCallResponse +from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse +from ..types.agent_kernel_request import AgentKernelRequest +from ..types.agent_log_response import AgentLogResponse +from ..types.agent_response import AgentResponse +from ..types.create_agent_log_response import CreateAgentLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.http_validation_error import HttpValidationError +from ..types.list_agents import ListAgents +from ..types.log_status import LogStatus +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.paginated_data_agent_response import PaginatedDataAgentResponse +from ..types.sort_order import SortOrder +from ..types.template_language import TemplateLanguage +from .requests.agent_log_request_agent import AgentLogRequestAgentParams +from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams +from .requests.agent_request_stop import AgentRequestStopParams +from .requests.agent_request_template import AgentRequestTemplateParams +from .requests.agent_request_tools_item import AgentRequestToolsItemParams +from .requests.agents_call_request_agent import AgentsCallRequestAgentParams +from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams +from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawAgentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateAgentLogResponse]: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentLogRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateAgentLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "agents/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentLogRequestAgentParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agent_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAgentLogResponse, + construct_type( + type_=CreateAgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentLogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentLogResponse, + construct_type( + type_=AgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + @contextlib.contextmanager + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallStreamRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]] + + """ + with self._client_wrapper.httpx_client.stream( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + def _iter(): + _event_source = httpx_sse.EventSource(_response) + for _sse in _event_source.iter_sse(): + if _sse.data == None: + return + try: + yield typing.cast( + AgentCallStreamResponse, + construct_type( + type_=AgentCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except Exception: + pass + return + + return HttpResponse(response=_response, data=_iter()) + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.text + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + yield stream() + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentCallResponse]: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentCallResponse] + + """ + _response = self._client_wrapper.httpx_client.request( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentsCallRequestAgentParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentCallResponse, + construct_type( + type_=AgentCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + @contextlib.contextmanager + def continue_call_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]] + + """ + with self._client_wrapper.httpx_client.stream( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + def stream() -> HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + def _iter(): + _event_source = httpx_sse.EventSource(_response) + for _sse in _event_source.iter_sse(): + if _sse.data == None: + return + try: + yield typing.cast( + AgentContinueCallStreamResponse, + construct_type( + type_=AgentContinueCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except Exception: + pass + return + + return HttpResponse(response=_response, data=_iter()) + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.text + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + yield stream() + + def continue_call( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentContinueCallResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentContinueCallResponse] + + """ + _response = self._client_wrapper.httpx_client.request( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentContinueCallResponse, + construct_type( + type_=AgentContinueCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[AgentResponse]: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[AgentResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "agents", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataAgentResponse, + construct_type( + type_=PaginatedDataAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "agents", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=AgentRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=AgentRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write" + ), + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" + ), + "attributes": attributes, + "max_iterations": max_iterations, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListAgents]: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListAgents] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgents, + construct_type( + type_=ListAgents, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[AgentResponse]: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[str]: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[str] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=_response.text) # type: ignore + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def deserialize( + self, *, agent: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[AgentKernelRequest]: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentKernelRequest] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "agents/deserialize", + method="POST", + json={ + "agent": agent, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentKernelRequest, + construct_type( + type_=AgentKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawAgentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentLogRequestAgentParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateAgentLogResponse]: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentLogRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateAgentLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentLogRequestAgentParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agent_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAgentLogResponse, + construct_type( + type_=CreateAgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentLogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentLogResponse, + construct_type( + type_=AgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + @contextlib.asynccontextmanager + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallStreamRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]] + + """ + async with self._client_wrapper.httpx_client.stream( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + async def _iter(): + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + if _sse.data == None: + return + try: + yield typing.cast( + AgentCallStreamResponse, + construct_type( + type_=AgentCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except Exception: + pass + return + + return AsyncHttpResponse(response=_response, data=_iter()) + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.text + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + yield await stream() + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentCallResponse]: + """ + Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + + If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, + pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + + The agent will run for the maximum number of iterations, or until it encounters a stop condition, + according to its configuration. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. A new version is created if it does not match + any existing ones. This is helpful in the case where you are storing or deriving + your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentsCallRequestAgentParams] + The Agent configuration to use. Two formats are supported: + - An object representing the details of the Agent configuration + - A string representing the raw contents of a .agent file + A new Agent version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentCallResponse] + + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentsCallRequestAgentParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentCallResponse, + construct_type( + type_=AgentCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + @contextlib.asynccontextmanager + async def continue_call_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]] + + """ + async with self._client_wrapper.httpx_client.stream( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + async def _iter(): + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + if _sse.data == None: + return + try: + yield typing.cast( + AgentContinueCallStreamResponse, + construct_type( + type_=AgentContinueCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except Exception: + pass + return + + return AsyncHttpResponse(response=_response, data=_iter()) + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.text + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + yield await stream() + + async def continue_call( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentContinueCallResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, by passing the tool call + requested by the Agent. The Agent will resume processing from where it left off. + + The messages in the request will be appended to the original messages in the Log. You do not + have to provide the previous conversation history. + + The original log must be in an incomplete state to be continued. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentContinueCallResponse] + + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentContinueCallResponse, + construct_type( + type_=AgentContinueCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[AgentResponse]: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[AgentResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "agents", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataAgentResponse, + construct_type( + type_=PaginatedDataAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "agents", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=AgentRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=AgentRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write" + ), + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" + ), + "attributes": attributes, + "max_iterations": max_iterations, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListAgents]: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListAgents] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgents, + construct_type( + type_=ListAgents, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[AgentResponse]: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[str]: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[str] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=_response.text) # type: ignore + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def deserialize( + self, *, agent: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[AgentKernelRequest]: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentKernelRequest] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/deserialize", + method="POST", + json={ + "agent": agent, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentKernelRequest, + construct_type( + type_=AgentKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py new file mode 100644 index 00000000..e02cfc67 --- /dev/null +++ b/src/humanloop/agents/requests/__init__.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .agent_log_request_agent import AgentLogRequestAgentParams +from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams +from .agent_request_stop import AgentRequestStopParams +from .agent_request_template import AgentRequestTemplateParams +from .agent_request_tools_item import AgentRequestToolsItemParams +from .agents_call_request_agent import AgentsCallRequestAgentParams +from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams +from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams + +__all__ = [ + "AgentLogRequestAgentParams", + "AgentLogRequestToolChoiceParams", + "AgentRequestReasoningEffortParams", + "AgentRequestStopParams", + "AgentRequestTemplateParams", + "AgentRequestToolsItemParams", + "AgentsCallRequestAgentParams", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestAgentParams", + "AgentsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py new file mode 100644 index 00000000..38a5adc4 --- /dev/null +++ b/src/humanloop/agents/requests/agent_log_request_agent.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.agent_kernel_request import AgentKernelRequestParams + +AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str] diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py new file mode 100644 index 00000000..02255e30 --- /dev/null +++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.tool_choice import ToolChoiceParams + +AgentLogRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py new file mode 100644 index 00000000..dfc8de95 --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py new file mode 100644 index 00000000..3970451c --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py new file mode 100644 index 00000000..3b9c8c1f --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.chat_message import ChatMessageParams + +AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py new file mode 100644 index 00000000..3bf06108 --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_tools_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.agent_inline_tool import AgentInlineToolParams +from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams + +AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py new file mode 100644 index 00000000..0123488f --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_request_agent.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.agent_kernel_request import AgentKernelRequestParams + +AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str] diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py new file mode 100644 index 00000000..9ebb0f75 --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.tool_choice import ToolChoiceParams + +AgentsCallRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py new file mode 100644 index 00000000..eab2c55c --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.agent_kernel_request import AgentKernelRequestParams + +AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str] diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py new file mode 100644 index 00000000..40ad08c2 --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.tool_choice import ToolChoiceParams + +AgentsCallStreamRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py new file mode 100644 index 00000000..0d9bf871 --- /dev/null +++ b/src/humanloop/agents/types/__init__.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .agent_log_request_agent import AgentLogRequestAgent +from .agent_log_request_tool_choice import AgentLogRequestToolChoice +from .agent_request_reasoning_effort import AgentRequestReasoningEffort +from .agent_request_stop import AgentRequestStop +from .agent_request_template import AgentRequestTemplate +from .agent_request_tools_item import AgentRequestToolsItem +from .agents_call_request_agent import AgentsCallRequestAgent +from .agents_call_request_tool_choice import AgentsCallRequestToolChoice +from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent +from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice + +__all__ = [ + "AgentLogRequestAgent", + "AgentLogRequestToolChoice", + "AgentRequestReasoningEffort", + "AgentRequestStop", + "AgentRequestTemplate", + "AgentRequestToolsItem", + "AgentsCallRequestAgent", + "AgentsCallRequestToolChoice", + "AgentsCallStreamRequestAgent", + "AgentsCallStreamRequestToolChoice", +] diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py new file mode 100644 index 00000000..b0e52d93 --- /dev/null +++ b/src/humanloop/agents/types/agent_log_request_agent.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.agent_kernel_request import AgentKernelRequest + +AgentLogRequestAgent = typing.Union[AgentKernelRequest, str] diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py new file mode 100644 index 00000000..b1d79f3a --- /dev/null +++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.tool_choice import ToolChoice + +AgentLogRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py new file mode 100644 index 00000000..3af67155 --- /dev/null +++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py new file mode 100644 index 00000000..325a6b2e --- /dev/null +++ b/src/humanloop/agents/types/agent_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py new file mode 100644 index 00000000..c4da3e69 --- /dev/null +++ b/src/humanloop/agents/types/agent_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.chat_message import ChatMessage + +AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py new file mode 100644 index 00000000..a43d160e --- /dev/null +++ b/src/humanloop/agents/types/agent_request_tools_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.agent_inline_tool import AgentInlineTool +from ...types.agent_linked_file_request import AgentLinkedFileRequest + +AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py new file mode 100644 index 00000000..5cfbc669 --- /dev/null +++ b/src/humanloop/agents/types/agents_call_request_agent.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.agent_kernel_request import AgentKernelRequest + +AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str] diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py new file mode 100644 index 00000000..aee291c9 --- /dev/null +++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.tool_choice import ToolChoice + +AgentsCallRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py new file mode 100644 index 00000000..c803d804 --- /dev/null +++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.agent_kernel_request import AgentKernelRequest + +AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str] diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py new file mode 100644 index 00000000..9e636efa --- /dev/null +++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.tool_choice import ToolChoice + +AgentsCallStreamRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py new file mode 100644 index 00000000..2234d799 --- /dev/null +++ b/src/humanloop/base_client.py @@ -0,0 +1,170 @@ +# This file was auto-generated by Fern from our API Definition. + +import os +import typing + +import httpx +from .agents.client import AgentsClient, AsyncAgentsClient +from .core.api_error import ApiError +from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from .datasets.client import AsyncDatasetsClient, DatasetsClient +from .directories.client import AsyncDirectoriesClient, DirectoriesClient +from .environment import HumanloopEnvironment +from .evaluations.client import AsyncEvaluationsClient, EvaluationsClient +from .evaluators.client import AsyncEvaluatorsClient, EvaluatorsClient +from .files.client import AsyncFilesClient, FilesClient +from .flows.client import AsyncFlowsClient, FlowsClient +from .logs.client import AsyncLogsClient, LogsClient +from .prompts.client import AsyncPromptsClient, PromptsClient +from .tools.client import AsyncToolsClient, ToolsClient + + +class BaseHumanloop: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : typing.Optional[str] + The base url to use for requests from the client. + + environment : HumanloopEnvironment + The environment to use for requests from the client. from .environment import HumanloopEnvironment + + Defaults to HumanloopEnvironment.DEFAULT + + + + api_key : typing.Optional[str] + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.Client] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + """ + + def __init__( + self, + *, + base_url: typing.Optional[str] = None, + environment: HumanloopEnvironment = HumanloopEnvironment.DEFAULT, + api_key: typing.Optional[str] = os.getenv("HUMANLOOP_API_KEY"), + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.Client] = None, + ): + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else httpx_client.timeout.read + ) + if api_key is None: + raise ApiError( + body="The client must be instantiated be either passing in api_key or setting HUMANLOOP_API_KEY" + ) + self._client_wrapper = SyncClientWrapper( + base_url=_get_base_url(base_url=base_url, environment=environment), + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + if follow_redirects is not None + else httpx.Client(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.prompts = PromptsClient(client_wrapper=self._client_wrapper) + self.tools = ToolsClient(client_wrapper=self._client_wrapper) + self.datasets = DatasetsClient(client_wrapper=self._client_wrapper) + self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper) + self.flows = FlowsClient(client_wrapper=self._client_wrapper) + self.agents = AgentsClient(client_wrapper=self._client_wrapper) + self.directories = DirectoriesClient(client_wrapper=self._client_wrapper) + self.files = FilesClient(client_wrapper=self._client_wrapper) + self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper) + self.logs = LogsClient(client_wrapper=self._client_wrapper) + + +class AsyncBaseHumanloop: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : typing.Optional[str] + The base url to use for requests from the client. + + environment : HumanloopEnvironment + The environment to use for requests from the client. from .environment import HumanloopEnvironment + + Defaults to HumanloopEnvironment.DEFAULT + + + + api_key : typing.Optional[str] + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.AsyncClient] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from humanloop import AsyncHumanloop + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + """ + + def __init__( + self, + *, + base_url: typing.Optional[str] = None, + environment: HumanloopEnvironment = HumanloopEnvironment.DEFAULT, + api_key: typing.Optional[str] = os.getenv("HUMANLOOP_API_KEY"), + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.AsyncClient] = None, + ): + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else httpx_client.timeout.read + ) + if api_key is None: + raise ApiError( + body="The client must be instantiated be either passing in api_key or setting HUMANLOOP_API_KEY" + ) + self._client_wrapper = AsyncClientWrapper( + base_url=_get_base_url(base_url=base_url, environment=environment), + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + if follow_redirects is not None + else httpx.AsyncClient(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper) + self.tools = AsyncToolsClient(client_wrapper=self._client_wrapper) + self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper) + self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper) + self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper) + self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper) + self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper) + self.files = AsyncFilesClient(client_wrapper=self._client_wrapper) + self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper) + self.logs = AsyncLogsClient(client_wrapper=self._client_wrapper) + + +def _get_base_url(*, base_url: typing.Optional[str] = None, environment: HumanloopEnvironment) -> str: + if base_url is not None: + return base_url + elif environment is not None: + return environment.value + else: + raise Exception("Please pass in either base_url or environment to construct the client") diff --git a/src/humanloop/core/__init__.py b/src/humanloop/core/__init__.py new file mode 100644 index 00000000..48f3afaa --- /dev/null +++ b/src/humanloop/core/__init__.py @@ -0,0 +1,59 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .api_error import ApiError +from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper +from .datetime_utils import serialize_datetime +from .file import File, convert_file_dict_to_httpx_tuples, with_content_type +from .http_client import AsyncHttpClient, HttpClient +from .http_response import AsyncHttpResponse, HttpResponse +from .jsonable_encoder import jsonable_encoder +from .pagination import AsyncPager, SyncPager +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + UniversalBaseModel, + UniversalRootModel, + parse_obj_as, + universal_field_validator, + universal_root_validator, + update_forward_refs, +) +from .query_encoder import encode_query +from .remove_none_from_dict import remove_none_from_dict +from .request_options import RequestOptions +from .serialization import FieldMetadata, convert_and_respect_annotation_metadata +from .unchecked_base_model import UncheckedBaseModel, UnionMetadata, construct_type + +__all__ = [ + "ApiError", + "AsyncClientWrapper", + "AsyncHttpClient", + "AsyncHttpResponse", + "AsyncPager", + "BaseClientWrapper", + "FieldMetadata", + "File", + "HttpClient", + "HttpResponse", + "IS_PYDANTIC_V2", + "RequestOptions", + "SyncClientWrapper", + "SyncPager", + "UncheckedBaseModel", + "UnionMetadata", + "UniversalBaseModel", + "UniversalRootModel", + "construct_type", + "convert_and_respect_annotation_metadata", + "convert_file_dict_to_httpx_tuples", + "encode_query", + "jsonable_encoder", + "parse_obj_as", + "remove_none_from_dict", + "serialize_datetime", + "universal_field_validator", + "universal_root_validator", + "update_forward_refs", + "with_content_type", +] diff --git a/src/humanloop/core/api_error.py b/src/humanloop/core/api_error.py new file mode 100644 index 00000000..6f850a60 --- /dev/null +++ b/src/humanloop/core/api_error.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, Optional + + +class ApiError(Exception): + headers: Optional[Dict[str, str]] + status_code: Optional[int] + body: Any + + def __init__( + self, + *, + headers: Optional[Dict[str, str]] = None, + status_code: Optional[int] = None, + body: Any = None, + ) -> None: + self.headers = headers + self.status_code = status_code + self.body = body + + def __str__(self) -> str: + return f"headers: {self.headers}, status_code: {self.status_code}, body: {self.body}" diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py new file mode 100644 index 00000000..a71ca52c --- /dev/null +++ b/src/humanloop/core/client_wrapper.py @@ -0,0 +1,55 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import httpx +from .http_client import AsyncHttpClient, HttpClient + + +class BaseClientWrapper: + def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None): + self.api_key = api_key + self._base_url = base_url + self._timeout = timeout + + def get_headers(self) -> typing.Dict[str, str]: + headers: typing.Dict[str, str] = { + "User-Agent": "humanloop/0.8.39", + "X-Fern-Language": "Python", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.39", + } + headers["X-API-KEY"] = self.api_key + return headers + + def get_base_url(self) -> str: + return self._base_url + + def get_timeout(self) -> typing.Optional[float]: + return self._timeout + + +class SyncClientWrapper(BaseClientWrapper): + def __init__( + self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.Client + ): + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) + self.httpx_client = HttpClient( + httpx_client=httpx_client, + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, + ) + + +class AsyncClientWrapper(BaseClientWrapper): + def __init__( + self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.AsyncClient + ): + super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) + self.httpx_client = AsyncHttpClient( + httpx_client=httpx_client, + base_headers=self.get_headers, + base_timeout=self.get_timeout, + base_url=self.get_base_url, + ) diff --git a/src/humanloop/core/datetime_utils.py b/src/humanloop/core/datetime_utils.py new file mode 100644 index 00000000..7c9864a9 --- /dev/null +++ b/src/humanloop/core/datetime_utils.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + + +def serialize_datetime(v: dt.datetime) -> str: + """ + Serialize a datetime including timezone info. + + Uses the timezone info provided if present, otherwise uses the current runtime's timezone info. + + UTC datetimes end in "Z" while all other timezones are represented as offset from UTC, e.g. +05:00. + """ + + def _serialize_zoned_datetime(v: dt.datetime) -> str: + if v.tzinfo is not None and v.tzinfo.tzname(None) == dt.timezone.utc.tzname(None): + # UTC is a special case where we use "Z" at the end instead of "+00:00" + return v.isoformat().replace("+00:00", "Z") + else: + # Delegate to the typical +/- offset format + return v.isoformat() + + if v.tzinfo is not None: + return _serialize_zoned_datetime(v) + else: + local_tz = dt.datetime.now().astimezone().tzinfo + localized_dt = v.replace(tzinfo=local_tz) + return _serialize_zoned_datetime(localized_dt) diff --git a/src/humanloop/core/file.py b/src/humanloop/core/file.py new file mode 100644 index 00000000..44b0d27c --- /dev/null +++ b/src/humanloop/core/file.py @@ -0,0 +1,67 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import IO, Dict, List, Mapping, Optional, Tuple, Union, cast + +# File typing inspired by the flexibility of types within the httpx library +# https://github.com/encode/httpx/blob/master/httpx/_types.py +FileContent = Union[IO[bytes], bytes, str] +File = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[ + Optional[str], + FileContent, + Optional[str], + Mapping[str, str], + ], +] + + +def convert_file_dict_to_httpx_tuples( + d: Dict[str, Union[File, List[File]]], +) -> List[Tuple[str, File]]: + """ + The format we use is a list of tuples, where the first element is the + name of the file and the second is the file object. Typically HTTPX wants + a dict, but to be able to send lists of files, you have to use the list + approach (which also works for non-lists) + https://github.com/encode/httpx/pull/1032 + """ + + httpx_tuples = [] + for key, file_like in d.items(): + if isinstance(file_like, list): + for file_like_item in file_like: + httpx_tuples.append((key, file_like_item)) + else: + httpx_tuples.append((key, file_like)) + return httpx_tuples + + +def with_content_type(*, file: File, default_content_type: str) -> File: + """ + This function resolves to the file's content type, if provided, and defaults + to the default_content_type value if not. + """ + if isinstance(file, tuple): + if len(file) == 2: + filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore + return (filename, content, default_content_type) + elif len(file) == 3: + filename, content, file_content_type = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore + out_content_type = file_content_type or default_content_type + return (filename, content, out_content_type) + elif len(file) == 4: + filename, content, file_content_type, headers = cast( # type: ignore + Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], file + ) + out_content_type = file_content_type or default_content_type + return (filename, content, out_content_type, headers) + else: + raise ValueError(f"Unexpected tuple length: {len(file)}") + return (None, file, default_content_type) diff --git a/src/humanloop/core/http_client.py b/src/humanloop/core/http_client.py new file mode 100644 index 00000000..e7bd4f79 --- /dev/null +++ b/src/humanloop/core/http_client.py @@ -0,0 +1,497 @@ +# This file was auto-generated by Fern from our API Definition. + +import asyncio +import email.utils +import re +import time +import typing +import urllib.parse +from contextlib import asynccontextmanager, contextmanager +from random import random + +import httpx +from .file import File, convert_file_dict_to_httpx_tuples +from .jsonable_encoder import jsonable_encoder +from .query_encoder import encode_query +from .remove_none_from_dict import remove_none_from_dict +from .request_options import RequestOptions + +INITIAL_RETRY_DELAY_SECONDS = 0.5 +MAX_RETRY_DELAY_SECONDS = 10 +MAX_RETRY_DELAY_SECONDS_FROM_HEADER = 30 + + +def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]: + """ + This function parses the `Retry-After` header in a HTTP response and returns the number of seconds to wait. + + Inspired by the urllib3 retry implementation. + """ + retry_after_ms = response_headers.get("retry-after-ms") + if retry_after_ms is not None: + try: + return int(retry_after_ms) / 1000 if retry_after_ms > 0 else 0 + except Exception: + pass + + retry_after = response_headers.get("retry-after") + if retry_after is None: + return None + + # Attempt to parse the header as an int. + if re.match(r"^\s*[0-9]+\s*$", retry_after): + seconds = float(retry_after) + # Fallback to parsing it as a date. + else: + retry_date_tuple = email.utils.parsedate_tz(retry_after) + if retry_date_tuple is None: + return None + if retry_date_tuple[9] is None: # Python 2 + # Assume UTC if no timezone was specified + # On Python2.7, parsedate_tz returns None for a timezone offset + # instead of 0 if no timezone is given, where mktime_tz treats + # a None timezone offset as local time. + retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:] + + retry_date = email.utils.mktime_tz(retry_date_tuple) + seconds = retry_date - time.time() + + if seconds < 0: + seconds = 0 + + return seconds + + +def _retry_timeout(response: httpx.Response, retries: int) -> float: + """ + Determine the amount of time to wait before retrying a request. + This function begins by trying to parse a retry-after header from the response, and then proceeds to use exponential backoff + with a jitter to determine the number of seconds to wait. + """ + + # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. + retry_after = _parse_retry_after(response.headers) + if retry_after is not None and retry_after <= MAX_RETRY_DELAY_SECONDS_FROM_HEADER: + return retry_after + + # Apply exponential backoff, capped at MAX_RETRY_DELAY_SECONDS. + retry_delay = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS) + + # Add a randomness / jitter to the retry delay to avoid overwhelming the server with retries. + timeout = retry_delay * (1 - 0.25 * random()) + return timeout if timeout >= 0 else 0 + + +def _should_retry(response: httpx.Response) -> bool: + retryable_400s = [429, 408, 409] + return response.status_code >= 500 or response.status_code in retryable_400s + + +def remove_omit_from_dict( + original: typing.Dict[str, typing.Optional[typing.Any]], + omit: typing.Optional[typing.Any], +) -> typing.Dict[str, typing.Any]: + if omit is None: + return original + new: typing.Dict[str, typing.Any] = {} + for key, value in original.items(): + if value is not omit: + new[key] = value + return new + + +def maybe_filter_request_body( + data: typing.Optional[typing.Any], + request_options: typing.Optional[RequestOptions], + omit: typing.Optional[typing.Any], +) -> typing.Optional[typing.Any]: + if data is None: + return ( + jsonable_encoder(request_options.get("additional_body_parameters", {})) or {} + if request_options is not None + else None + ) + elif not isinstance(data, typing.Mapping): + data_content = jsonable_encoder(data) + else: + data_content = { + **(jsonable_encoder(remove_omit_from_dict(data, omit))), # type: ignore + **( + jsonable_encoder(request_options.get("additional_body_parameters", {})) or {} + if request_options is not None + else {} + ), + } + return data_content + + +# Abstracted out for testing purposes +def get_request_body( + *, + json: typing.Optional[typing.Any], + data: typing.Optional[typing.Any], + request_options: typing.Optional[RequestOptions], + omit: typing.Optional[typing.Any], +) -> typing.Tuple[typing.Optional[typing.Any], typing.Optional[typing.Any]]: + json_body = None + data_body = None + if data is not None: + data_body = maybe_filter_request_body(data, request_options, omit) + else: + # If both data and json are None, we send json data in the event extra properties are specified + json_body = maybe_filter_request_body(json, request_options, omit) + + # If you have an empty JSON body, you should just send None + return (json_body if json_body != {} else None), data_body if data_body != {} else None + + +class HttpClient: + def __init__( + self, + *, + httpx_client: httpx.Client, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, + ): + self.base_url = base_url + self.base_timeout = base_timeout + self.base_headers = base_headers + self.httpx_client = httpx_client + + def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + + if base_url is None: + raise ValueError("A base_url is required to make this request, please provide one and try again.") + return base_url + + def request( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, + files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 2, + omit: typing.Optional[typing.Any] = None, + ) -> httpx.Response: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) + + response = self.httpx_client.request( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if (files is not None and files is not omit) + else None + ), + timeout=timeout, + ) + + max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0 + if _should_retry(response=response): + if max_retries > retries: + time.sleep(_retry_timeout(response=response, retries=retries)) + return self.request( + path=path, + method=method, + base_url=base_url, + params=params, + json=json, + content=content, + files=files, + headers=headers, + request_options=request_options, + retries=retries + 1, + omit=omit, + ) + + return response + + @contextmanager + def stream( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, + files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 2, + omit: typing.Optional[typing.Any] = None, + ) -> typing.Iterator[httpx.Response]: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) + + with self.httpx_client.stream( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if (files is not None and files is not omit) + else None + ), + timeout=timeout, + ) as stream: + yield stream + + +class AsyncHttpClient: + def __init__( + self, + *, + httpx_client: httpx.AsyncClient, + base_timeout: typing.Callable[[], typing.Optional[float]], + base_headers: typing.Callable[[], typing.Dict[str, str]], + base_url: typing.Optional[typing.Callable[[], str]] = None, + ): + self.base_url = base_url + self.base_timeout = base_timeout + self.base_headers = base_headers + self.httpx_client = httpx_client + + def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: + base_url = maybe_base_url + if self.base_url is not None and base_url is None: + base_url = self.base_url() + + if base_url is None: + raise ValueError("A base_url is required to make this request, please provide one and try again.") + return base_url + + async def request( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, + files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 2, + omit: typing.Optional[typing.Any] = None, + ) -> httpx.Response: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) + + # Add the input to each of these and do None-safety checks + response = await self.httpx_client.request( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) or {} + if request_options is not None + else {} + ), + }, + omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if files is not None + else None + ), + timeout=timeout, + ) + + max_retries: int = request_options.get("max_retries", 0) if request_options is not None else 0 + if _should_retry(response=response): + if max_retries > retries: + await asyncio.sleep(_retry_timeout(response=response, retries=retries)) + return await self.request( + path=path, + method=method, + base_url=base_url, + params=params, + json=json, + content=content, + files=files, + headers=headers, + request_options=request_options, + retries=retries + 1, + omit=omit, + ) + return response + + @asynccontextmanager + async def stream( + self, + path: typing.Optional[str] = None, + *, + method: str, + base_url: typing.Optional[str] = None, + params: typing.Optional[typing.Dict[str, typing.Any]] = None, + json: typing.Optional[typing.Any] = None, + data: typing.Optional[typing.Any] = None, + content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None, + files: typing.Optional[typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]]] = None, + headers: typing.Optional[typing.Dict[str, typing.Any]] = None, + request_options: typing.Optional[RequestOptions] = None, + retries: int = 2, + omit: typing.Optional[typing.Any] = None, + ) -> typing.AsyncIterator[httpx.Response]: + base_url = self.get_base_url(base_url) + timeout = ( + request_options.get("timeout_in_seconds") + if request_options is not None and request_options.get("timeout_in_seconds") is not None + else self.base_timeout() + ) + + json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit) + + async with self.httpx_client.stream( + method=method, + url=urllib.parse.urljoin(f"{base_url}/", path), + headers=jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **(request_options.get("additional_headers", {}) if request_options is not None else {}), + } + ) + ), + params=encode_query( + jsonable_encoder( + remove_none_from_dict( + remove_omit_from_dict( + { + **(params if params is not None else {}), + **( + request_options.get("additional_query_parameters", {}) + if request_options is not None + else {} + ), + }, + omit=omit, + ) + ) + ) + ), + json=json_body, + data=data_body, + content=content, + files=( + convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit)) + if files is not None + else None + ), + timeout=timeout, + ) as stream: + yield stream diff --git a/src/humanloop/core/http_response.py b/src/humanloop/core/http_response.py new file mode 100644 index 00000000..48a1798a --- /dev/null +++ b/src/humanloop/core/http_response.py @@ -0,0 +1,55 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Dict, Generic, TypeVar + +import httpx + +T = TypeVar("T") +"""Generic to represent the underlying type of the data wrapped by the HTTP response.""" + + +class BaseHttpResponse: + """Minimalist HTTP response wrapper that exposes response headers.""" + + _response: httpx.Response + + def __init__(self, response: httpx.Response): + self._response = response + + @property + def headers(self) -> Dict[str, str]: + return dict(self._response.headers) + + +class HttpResponse(Generic[T], BaseHttpResponse): + """HTTP response wrapper that exposes response headers and data.""" + + _data: T + + def __init__(self, response: httpx.Response, data: T): + super().__init__(response) + self._data = data + + @property + def data(self) -> T: + return self._data + + def close(self) -> None: + self._response.close() + + +class AsyncHttpResponse(Generic[T], BaseHttpResponse): + """HTTP response wrapper that exposes response headers and data.""" + + _data: T + + def __init__(self, response: httpx.Response, data: T): + super().__init__(response) + self._data = data + + @property + def data(self) -> T: + return self._data + + async def close(self) -> None: + await self._response.aclose() diff --git a/src/humanloop/core/jsonable_encoder.py b/src/humanloop/core/jsonable_encoder.py new file mode 100644 index 00000000..afee3662 --- /dev/null +++ b/src/humanloop/core/jsonable_encoder.py @@ -0,0 +1,100 @@ +# This file was auto-generated by Fern from our API Definition. + +""" +jsonable_encoder converts a Python object to a JSON-friendly dict +(e.g. datetimes to strings, Pydantic models to dicts). + +Taken from FastAPI, and made a bit simpler +https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py +""" + +import base64 +import dataclasses +import datetime as dt +from enum import Enum +from pathlib import PurePath +from types import GeneratorType +from typing import Any, Callable, Dict, List, Optional, Set, Union + +import pydantic +from .datetime_utils import serialize_datetime +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + encode_by_type, + to_jsonable_with_fallback, +) + +SetIntStr = Set[Union[int, str]] +DictIntStrAny = Dict[Union[int, str], Any] + + +def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any: + custom_encoder = custom_encoder or {} + if custom_encoder: + if type(obj) in custom_encoder: + return custom_encoder[type(obj)](obj) + else: + for encoder_type, encoder_instance in custom_encoder.items(): + if isinstance(obj, encoder_type): + return encoder_instance(obj) + if isinstance(obj, pydantic.BaseModel): + if IS_PYDANTIC_V2: + encoder = getattr(obj.model_config, "json_encoders", {}) # type: ignore # Pydantic v2 + else: + encoder = getattr(obj.__config__, "json_encoders", {}) # type: ignore # Pydantic v1 + if custom_encoder: + encoder.update(custom_encoder) + obj_dict = obj.dict(by_alias=True) + if "__root__" in obj_dict: + obj_dict = obj_dict["__root__"] + if "root" in obj_dict: + obj_dict = obj_dict["root"] + return jsonable_encoder(obj_dict, custom_encoder=encoder) + if dataclasses.is_dataclass(obj): + obj_dict = dataclasses.asdict(obj) # type: ignore + return jsonable_encoder(obj_dict, custom_encoder=custom_encoder) + if isinstance(obj, bytes): + return base64.b64encode(obj).decode("utf-8") + if isinstance(obj, Enum): + return obj.value + if isinstance(obj, PurePath): + return str(obj) + if isinstance(obj, (str, int, float, type(None))): + return obj + if isinstance(obj, dt.datetime): + return serialize_datetime(obj) + if isinstance(obj, dt.date): + return str(obj) + if isinstance(obj, dict): + encoded_dict = {} + allowed_keys = set(obj.keys()) + for key, value in obj.items(): + if key in allowed_keys: + encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder) + encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder) + encoded_dict[encoded_key] = encoded_value + return encoded_dict + if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): + encoded_list = [] + for item in obj: + encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder)) + return encoded_list + + def fallback_serializer(o: Any) -> Any: + attempt_encode = encode_by_type(o) + if attempt_encode is not None: + return attempt_encode + + try: + data = dict(o) + except Exception as e: + errors: List[Exception] = [] + errors.append(e) + try: + data = vars(o) + except Exception as e: + errors.append(e) + raise ValueError(errors) from e + return jsonable_encoder(data, custom_encoder=custom_encoder) + + return to_jsonable_with_fallback(obj, fallback_serializer) diff --git a/src/humanloop/core/pagination.py b/src/humanloop/core/pagination.py new file mode 100644 index 00000000..209a1ff1 --- /dev/null +++ b/src/humanloop/core/pagination.py @@ -0,0 +1,82 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +from dataclasses import dataclass +from typing import AsyncIterator, Awaitable, Callable, Generic, Iterator, List, Optional, TypeVar + +from .http_response import BaseHttpResponse + +T = TypeVar("T") +"""Generic to represent the underlying type of the results within a page""" + + +# SDKs implement a Page ABC per-pagination request, the endpoint then returns a pager that wraps this type +# for example, an endpoint will return SyncPager[UserPage] where UserPage implements the Page ABC. ex: +# +# SyncPager( +# has_next=response.list_metadata.after is not None, +# items=response.data, +# # This should be the outer function that returns the SyncPager again +# get_next=lambda: list(..., cursor: response.cursor) (or list(..., offset: offset + 1)) +# ) + + +@dataclass(frozen=True) +class SyncPager(Generic[T]): + get_next: Optional[Callable[[], Optional[SyncPager[T]]]] + has_next: bool + items: Optional[List[T]] + response: Optional[BaseHttpResponse] + + # Here we type ignore the iterator to avoid a mypy error + # caused by the type conflict with Pydanitc's __iter__ method + # brought in by extending the base model + def __iter__(self) -> Iterator[T]: # type: ignore[override] + for page in self.iter_pages(): + if page.items is not None: + yield from page.items + + def iter_pages(self) -> Iterator[SyncPager[T]]: + page: Optional[SyncPager[T]] = self + while page is not None: + yield page + + if not page.has_next or page.get_next is None: + return + + page = page.get_next() + if page is None or page.items is None or len(page.items) == 0: + return + + def next_page(self) -> Optional[SyncPager[T]]: + return self.get_next() if self.get_next is not None else None + + +@dataclass(frozen=True) +class AsyncPager(Generic[T]): + get_next: Optional[Callable[[], Awaitable[Optional[AsyncPager[T]]]]] + has_next: bool + items: Optional[List[T]] + response: Optional[BaseHttpResponse] + + async def __aiter__(self) -> AsyncIterator[T]: + async for page in self.iter_pages(): + if page.items is not None: + for item in page.items: + yield item + + async def iter_pages(self) -> AsyncIterator[AsyncPager[T]]: + page: Optional[AsyncPager[T]] = self + while page is not None: + yield page + + if not page.has_next or page.get_next is None: + return + + page = await page.get_next() + if page is None or page.items is None or len(page.items) == 0: + return + + async def next_page(self) -> Optional[AsyncPager[T]]: + return await self.get_next() if self.get_next is not None else None diff --git a/src/humanloop/core/pydantic_utilities.py b/src/humanloop/core/pydantic_utilities.py new file mode 100644 index 00000000..0360ef49 --- /dev/null +++ b/src/humanloop/core/pydantic_utilities.py @@ -0,0 +1,255 @@ +# This file was auto-generated by Fern from our API Definition. + +# nopycln: file +import datetime as dt +from collections import defaultdict +from typing import Any, Callable, ClassVar, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast + +import pydantic + +IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.") + +if IS_PYDANTIC_V2: + from pydantic.v1.datetime_parse import parse_date as parse_date + from pydantic.v1.datetime_parse import parse_datetime as parse_datetime + from pydantic.v1.fields import ModelField as ModelField + from pydantic.v1.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[attr-defined] + from pydantic.v1.typing import get_args as get_args + from pydantic.v1.typing import get_origin as get_origin + from pydantic.v1.typing import is_literal_type as is_literal_type + from pydantic.v1.typing import is_union as is_union +else: + from pydantic.datetime_parse import parse_date as parse_date # type: ignore[no-redef] + from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore[no-redef] + from pydantic.fields import ModelField as ModelField # type: ignore[attr-defined, no-redef] + from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[no-redef] + from pydantic.typing import get_args as get_args # type: ignore[no-redef] + from pydantic.typing import get_origin as get_origin # type: ignore[no-redef] + from pydantic.typing import is_literal_type as is_literal_type # type: ignore[no-redef] + from pydantic.typing import is_union as is_union # type: ignore[no-redef] + +from .datetime_utils import serialize_datetime +from .serialization import convert_and_respect_annotation_metadata +from typing_extensions import TypeAlias + +T = TypeVar("T") +Model = TypeVar("Model", bound=pydantic.BaseModel) + + +def parse_obj_as(type_: Type[T], object_: Any) -> T: + dealiased_object = convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read") + if IS_PYDANTIC_V2: + adapter = pydantic.TypeAdapter(type_) # type: ignore[attr-defined] + return adapter.validate_python(dealiased_object) + return pydantic.parse_obj_as(type_, dealiased_object) + + +def to_jsonable_with_fallback(obj: Any, fallback_serializer: Callable[[Any], Any]) -> Any: + if IS_PYDANTIC_V2: + from pydantic_core import to_jsonable_python + + return to_jsonable_python(obj, fallback=fallback_serializer) + return fallback_serializer(obj) + + +class UniversalBaseModel(pydantic.BaseModel): + if IS_PYDANTIC_V2: + model_config: ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( # type: ignore[typeddict-unknown-key] + # Allow fields beginning with `model_` to be used in the model + protected_namespaces=(), + ) + + @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore[attr-defined] + def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> Any: # type: ignore[name-defined] + serialized = handler(self) + data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()} + return data + + else: + + class Config: + smart_union = True + json_encoders = {dt.datetime: serialize_datetime} + + @classmethod + def model_construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model": + dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read") + return cls.construct(_fields_set, **dealiased_object) + + @classmethod + def construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model": + dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read") + if IS_PYDANTIC_V2: + return super().model_construct(_fields_set, **dealiased_object) # type: ignore[misc] + return super().construct(_fields_set, **dealiased_object) + + def json(self, **kwargs: Any) -> str: + kwargs_with_defaults = { + "by_alias": True, + "exclude_unset": True, + **kwargs, + } + if IS_PYDANTIC_V2: + return super().model_dump_json(**kwargs_with_defaults) # type: ignore[misc] + return super().json(**kwargs_with_defaults) + + def dict(self, **kwargs: Any) -> Dict[str, Any]: + """ + Override the default dict method to `exclude_unset` by default. This function patches + `exclude_unset` to work include fields within non-None default values. + """ + # Note: the logic here is multiplexed given the levers exposed in Pydantic V1 vs V2 + # Pydantic V1's .dict can be extremely slow, so we do not want to call it twice. + # + # We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models + # that we have less control over, and this is less intrusive than custom serializers for now. + if IS_PYDANTIC_V2: + kwargs_with_defaults_exclude_unset = { + **kwargs, + "by_alias": True, + "exclude_unset": True, + "exclude_none": False, + } + kwargs_with_defaults_exclude_none = { + **kwargs, + "by_alias": True, + "exclude_none": True, + "exclude_unset": False, + } + dict_dump = deep_union_pydantic_dicts( + super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore[misc] + super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore[misc] + ) + + else: + _fields_set = self.__fields_set__.copy() + + fields = _get_model_fields(self.__class__) + for name, field in fields.items(): + if name not in _fields_set: + default = _get_field_default(field) + + # If the default values are non-null act like they've been set + # This effectively allows exclude_unset to work like exclude_none where + # the latter passes through intentionally set none values. + if default is not None or ("exclude_unset" in kwargs and not kwargs["exclude_unset"]): + _fields_set.add(name) + + if default is not None: + self.__fields_set__.add(name) + + kwargs_with_defaults_exclude_unset_include_fields = { + "by_alias": True, + "exclude_unset": True, + "include": _fields_set, + **kwargs, + } + + dict_dump = super().dict(**kwargs_with_defaults_exclude_unset_include_fields) + + return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write") + + +def _union_list_of_pydantic_dicts(source: List[Any], destination: List[Any]) -> List[Any]: + converted_list: List[Any] = [] + for i, item in enumerate(source): + destination_value = destination[i] + if isinstance(item, dict): + converted_list.append(deep_union_pydantic_dicts(item, destination_value)) + elif isinstance(item, list): + converted_list.append(_union_list_of_pydantic_dicts(item, destination_value)) + else: + converted_list.append(item) + return converted_list + + +def deep_union_pydantic_dicts(source: Dict[str, Any], destination: Dict[str, Any]) -> Dict[str, Any]: + for key, value in source.items(): + node = destination.setdefault(key, {}) + if isinstance(value, dict): + deep_union_pydantic_dicts(value, node) + # Note: we do not do this same processing for sets given we do not have sets of models + # and given the sets are unordered, the processing of the set and matching objects would + # be non-trivial. + elif isinstance(value, list): + destination[key] = _union_list_of_pydantic_dicts(value, node) + else: + destination[key] = value + + return destination + + +if IS_PYDANTIC_V2: + + class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore[misc, name-defined, type-arg] + pass + + UniversalRootModel: TypeAlias = V2RootModel # type: ignore[misc] +else: + UniversalRootModel: TypeAlias = UniversalBaseModel # type: ignore[misc, no-redef] + + +def encode_by_type(o: Any) -> Any: + encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(tuple) + for type_, encoder in encoders_by_type.items(): + encoders_by_class_tuples[encoder] += (type_,) + + if type(o) in encoders_by_type: + return encoders_by_type[type(o)](o) + for encoder, classes_tuple in encoders_by_class_tuples.items(): + if isinstance(o, classes_tuple): + return encoder(o) + + +def update_forward_refs(model: Type["Model"], **localns: Any) -> None: + if IS_PYDANTIC_V2: + model.model_rebuild(raise_errors=False) # type: ignore[attr-defined] + else: + model.update_forward_refs(**localns) + + +# Mirrors Pydantic's internal typing +AnyCallable = Callable[..., Any] + + +def universal_root_validator( + pre: bool = False, +) -> Callable[[AnyCallable], AnyCallable]: + def decorator(func: AnyCallable) -> AnyCallable: + if IS_PYDANTIC_V2: + return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined] + return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload] + + return decorator + + +def universal_field_validator(field_name: str, pre: bool = False) -> Callable[[AnyCallable], AnyCallable]: + def decorator(func: AnyCallable) -> AnyCallable: + if IS_PYDANTIC_V2: + return cast(AnyCallable, pydantic.field_validator(field_name, mode="before" if pre else "after")(func)) # type: ignore[attr-defined] + return cast(AnyCallable, pydantic.validator(field_name, pre=pre)(func)) + + return decorator + + +PydanticField = Union[ModelField, pydantic.fields.FieldInfo] + + +def _get_model_fields(model: Type["Model"]) -> Mapping[str, PydanticField]: + if IS_PYDANTIC_V2: + return cast(Mapping[str, PydanticField], model.model_fields) # type: ignore[attr-defined] + return cast(Mapping[str, PydanticField], model.__fields__) + + +def _get_field_default(field: PydanticField) -> Any: + try: + value = field.get_default() # type: ignore[union-attr] + except: + value = field.default + if IS_PYDANTIC_V2: + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + return value diff --git a/src/humanloop/core/query_encoder.py b/src/humanloop/core/query_encoder.py new file mode 100644 index 00000000..3183001d --- /dev/null +++ b/src/humanloop/core/query_encoder.py @@ -0,0 +1,58 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, List, Optional, Tuple + +import pydantic + + +# Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict +def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[str] = None) -> List[Tuple[str, Any]]: + result = [] + for k, v in dict_flat.items(): + key = f"{key_prefix}[{k}]" if key_prefix is not None else k + if isinstance(v, dict): + result.extend(traverse_query_dict(v, key)) + elif isinstance(v, list): + for arr_v in v: + if isinstance(arr_v, dict): + result.extend(traverse_query_dict(arr_v, key)) + else: + result.append((key, arr_v)) + else: + result.append((key, v)) + return result + + +def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple[str, Any]]: + if isinstance(query_value, pydantic.BaseModel) or isinstance(query_value, dict): + if isinstance(query_value, pydantic.BaseModel): + obj_dict = query_value.dict(by_alias=True) + else: + obj_dict = query_value + return traverse_query_dict(obj_dict, query_key) + elif isinstance(query_value, list): + encoded_values: List[Tuple[str, Any]] = [] + for value in query_value: + if isinstance(value, pydantic.BaseModel) or isinstance(value, dict): + if isinstance(value, pydantic.BaseModel): + obj_dict = value.dict(by_alias=True) + elif isinstance(value, dict): + obj_dict = value + + encoded_values.extend(single_query_encoder(query_key, obj_dict)) + else: + encoded_values.append((query_key, value)) + + return encoded_values + + return [(query_key, query_value)] + + +def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple[str, Any]]]: + if query is None: + return None + + encoded_query = [] + for k, v in query.items(): + encoded_query.extend(single_query_encoder(k, v)) + return encoded_query diff --git a/src/humanloop/core/remove_none_from_dict.py b/src/humanloop/core/remove_none_from_dict.py new file mode 100644 index 00000000..c2298143 --- /dev/null +++ b/src/humanloop/core/remove_none_from_dict.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, Mapping, Optional + + +def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict[str, Any]: + new: Dict[str, Any] = {} + for key, value in original.items(): + if value is not None: + new[key] = value + return new diff --git a/src/humanloop/core/request_options.py b/src/humanloop/core/request_options.py new file mode 100644 index 00000000..1b388044 --- /dev/null +++ b/src/humanloop/core/request_options.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +try: + from typing import NotRequired # type: ignore +except ImportError: + from typing_extensions import NotRequired + + +class RequestOptions(typing.TypedDict, total=False): + """ + Additional options for request-specific configuration when calling APIs via the SDK. + This is used primarily as an optional final parameter for service functions. + + Attributes: + - timeout_in_seconds: int. The number of seconds to await an API call before timing out. + + - max_retries: int. The max number of retries to attempt if the API call fails. + + - additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict + + - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict + + - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict + + - chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads. + """ + + timeout_in_seconds: NotRequired[int] + max_retries: NotRequired[int] + additional_headers: NotRequired[typing.Dict[str, typing.Any]] + additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]] + additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]] + chunk_size: NotRequired[int] diff --git a/src/humanloop/core/serialization.py b/src/humanloop/core/serialization.py new file mode 100644 index 00000000..c36e865c --- /dev/null +++ b/src/humanloop/core/serialization.py @@ -0,0 +1,276 @@ +# This file was auto-generated by Fern from our API Definition. + +import collections +import inspect +import typing + +import pydantic +import typing_extensions + + +class FieldMetadata: + """ + Metadata class used to annotate fields to provide additional information. + + Example: + class MyDict(TypedDict): + field: typing.Annotated[str, FieldMetadata(alias="field_name")] + + Will serialize: `{"field": "value"}` + To: `{"field_name": "value"}` + """ + + alias: str + + def __init__(self, *, alias: str) -> None: + self.alias = alias + + +def convert_and_respect_annotation_metadata( + *, + object_: typing.Any, + annotation: typing.Any, + inner_type: typing.Optional[typing.Any] = None, + direction: typing.Literal["read", "write"], +) -> typing.Any: + """ + Respect the metadata annotations on a field, such as aliasing. This function effectively + manipulates the dict-form of an object to respect the metadata annotations. This is primarily used for + TypedDicts, which cannot support aliasing out of the box, and can be extended for additional + utilities, such as defaults. + + Parameters + ---------- + object_ : typing.Any + + annotation : type + The type we're looking to apply typing annotations from + + inner_type : typing.Optional[type] + + Returns + ------- + typing.Any + """ + + if object_ is None: + return None + if inner_type is None: + inner_type = annotation + + clean_type = _remove_annotations(inner_type) + # Pydantic models + if ( + inspect.isclass(clean_type) + and issubclass(clean_type, pydantic.BaseModel) + and isinstance(object_, typing.Mapping) + ): + return _convert_mapping(object_, clean_type, direction) + # TypedDicts + if typing_extensions.is_typeddict(clean_type) and isinstance(object_, typing.Mapping): + return _convert_mapping(object_, clean_type, direction) + + if ( + typing_extensions.get_origin(clean_type) == typing.Dict + or typing_extensions.get_origin(clean_type) == dict + or clean_type == typing.Dict + ) and isinstance(object_, typing.Dict): + key_type = typing_extensions.get_args(clean_type)[0] + value_type = typing_extensions.get_args(clean_type)[1] + + return { + key: convert_and_respect_annotation_metadata( + object_=value, + annotation=annotation, + inner_type=value_type, + direction=direction, + ) + for key, value in object_.items() + } + + # If you're iterating on a string, do not bother to coerce it to a sequence. + if not isinstance(object_, str): + if ( + typing_extensions.get_origin(clean_type) == typing.Set + or typing_extensions.get_origin(clean_type) == set + or clean_type == typing.Set + ) and isinstance(object_, typing.Set): + inner_type = typing_extensions.get_args(clean_type)[0] + return { + convert_and_respect_annotation_metadata( + object_=item, + annotation=annotation, + inner_type=inner_type, + direction=direction, + ) + for item in object_ + } + elif ( + ( + typing_extensions.get_origin(clean_type) == typing.List + or typing_extensions.get_origin(clean_type) == list + or clean_type == typing.List + ) + and isinstance(object_, typing.List) + ) or ( + ( + typing_extensions.get_origin(clean_type) == typing.Sequence + or typing_extensions.get_origin(clean_type) == collections.abc.Sequence + or clean_type == typing.Sequence + ) + and isinstance(object_, typing.Sequence) + ): + inner_type = typing_extensions.get_args(clean_type)[0] + return [ + convert_and_respect_annotation_metadata( + object_=item, + annotation=annotation, + inner_type=inner_type, + direction=direction, + ) + for item in object_ + ] + + if typing_extensions.get_origin(clean_type) == typing.Union: + # We should be able to ~relatively~ safely try to convert keys against all + # member types in the union, the edge case here is if one member aliases a field + # of the same name to a different name from another member + # Or if another member aliases a field of the same name that another member does not. + for member in typing_extensions.get_args(clean_type): + object_ = convert_and_respect_annotation_metadata( + object_=object_, + annotation=annotation, + inner_type=member, + direction=direction, + ) + return object_ + + annotated_type = _get_annotation(annotation) + if annotated_type is None: + return object_ + + # If the object is not a TypedDict, a Union, or other container (list, set, sequence, etc.) + # Then we can safely call it on the recursive conversion. + return object_ + + +def _convert_mapping( + object_: typing.Mapping[str, object], + expected_type: typing.Any, + direction: typing.Literal["read", "write"], +) -> typing.Mapping[str, object]: + converted_object: typing.Dict[str, object] = {} + try: + annotations = typing_extensions.get_type_hints(expected_type, include_extras=True) + except NameError: + # The TypedDict contains a circular reference, so + # we use the __annotations__ attribute directly. + annotations = getattr(expected_type, "__annotations__", {}) + aliases_to_field_names = _get_alias_to_field_name(annotations) + for key, value in object_.items(): + if direction == "read" and key in aliases_to_field_names: + dealiased_key = aliases_to_field_names.get(key) + if dealiased_key is not None: + type_ = annotations.get(dealiased_key) + else: + type_ = annotations.get(key) + # Note you can't get the annotation by the field name if you're in read mode, so you must check the aliases map + # + # So this is effectively saying if we're in write mode, and we don't have a type, or if we're in read mode and we don't have an alias + # then we can just pass the value through as is + if type_ is None: + converted_object[key] = value + elif direction == "read" and key not in aliases_to_field_names: + converted_object[key] = convert_and_respect_annotation_metadata( + object_=value, annotation=type_, direction=direction + ) + else: + converted_object[_alias_key(key, type_, direction, aliases_to_field_names)] = ( + convert_and_respect_annotation_metadata(object_=value, annotation=type_, direction=direction) + ) + return converted_object + + +def _get_annotation(type_: typing.Any) -> typing.Optional[typing.Any]: + maybe_annotated_type = typing_extensions.get_origin(type_) + if maybe_annotated_type is None: + return None + + if maybe_annotated_type == typing_extensions.NotRequired: + type_ = typing_extensions.get_args(type_)[0] + maybe_annotated_type = typing_extensions.get_origin(type_) + + if maybe_annotated_type == typing_extensions.Annotated: + return type_ + + return None + + +def _remove_annotations(type_: typing.Any) -> typing.Any: + maybe_annotated_type = typing_extensions.get_origin(type_) + if maybe_annotated_type is None: + return type_ + + if maybe_annotated_type == typing_extensions.NotRequired: + return _remove_annotations(typing_extensions.get_args(type_)[0]) + + if maybe_annotated_type == typing_extensions.Annotated: + return _remove_annotations(typing_extensions.get_args(type_)[0]) + + return type_ + + +def get_alias_to_field_mapping(type_: typing.Any) -> typing.Dict[str, str]: + annotations = typing_extensions.get_type_hints(type_, include_extras=True) + return _get_alias_to_field_name(annotations) + + +def get_field_to_alias_mapping(type_: typing.Any) -> typing.Dict[str, str]: + annotations = typing_extensions.get_type_hints(type_, include_extras=True) + return _get_field_to_alias_name(annotations) + + +def _get_alias_to_field_name( + field_to_hint: typing.Dict[str, typing.Any], +) -> typing.Dict[str, str]: + aliases = {} + for field, hint in field_to_hint.items(): + maybe_alias = _get_alias_from_type(hint) + if maybe_alias is not None: + aliases[maybe_alias] = field + return aliases + + +def _get_field_to_alias_name( + field_to_hint: typing.Dict[str, typing.Any], +) -> typing.Dict[str, str]: + aliases = {} + for field, hint in field_to_hint.items(): + maybe_alias = _get_alias_from_type(hint) + if maybe_alias is not None: + aliases[field] = maybe_alias + return aliases + + +def _get_alias_from_type(type_: typing.Any) -> typing.Optional[str]: + maybe_annotated_type = _get_annotation(type_) + + if maybe_annotated_type is not None: + # The actual annotations are 1 onward, the first is the annotated type + annotations = typing_extensions.get_args(maybe_annotated_type)[1:] + + for annotation in annotations: + if isinstance(annotation, FieldMetadata) and annotation.alias is not None: + return annotation.alias + return None + + +def _alias_key( + key: str, + type_: typing.Any, + direction: typing.Literal["read", "write"], + aliases_to_field_names: typing.Dict[str, str], +) -> str: + if direction == "read": + return aliases_to_field_names.get(key, key) + return _get_alias_from_type(type_=type_) or key diff --git a/src/humanloop/core/unchecked_base_model.py b/src/humanloop/core/unchecked_base_model.py new file mode 100644 index 00000000..2c2d92a7 --- /dev/null +++ b/src/humanloop/core/unchecked_base_model.py @@ -0,0 +1,303 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import inspect +import typing +import uuid + +import pydantic +import typing_extensions +from .pydantic_utilities import ( + IS_PYDANTIC_V2, + ModelField, + UniversalBaseModel, + get_args, + get_origin, + is_literal_type, + is_union, + parse_date, + parse_datetime, + parse_obj_as, +) +from .serialization import get_field_to_alias_mapping +from pydantic_core import PydanticUndefined + + +class UnionMetadata: + discriminant: str + + def __init__(self, *, discriminant: str) -> None: + self.discriminant = discriminant + + +Model = typing.TypeVar("Model", bound=pydantic.BaseModel) + + +class UncheckedBaseModel(UniversalBaseModel): + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow") # type: ignore # Pydantic v2 + else: + + class Config: + extra = pydantic.Extra.allow + + @classmethod + def model_construct( + cls: typing.Type["Model"], + _fields_set: typing.Optional[typing.Set[str]] = None, + **values: typing.Any, + ) -> "Model": + # Fallback construct function to the specified override below. + return cls.construct(_fields_set=_fields_set, **values) + + # Allow construct to not validate model + # Implementation taken from: https://github.com/pydantic/pydantic/issues/1168#issuecomment-817742836 + @classmethod + def construct( + cls: typing.Type["Model"], + _fields_set: typing.Optional[typing.Set[str]] = None, + **values: typing.Any, + ) -> "Model": + m = cls.__new__(cls) + fields_values = {} + + if _fields_set is None: + _fields_set = set(values.keys()) + + fields = _get_model_fields(cls) + populate_by_name = _get_is_populate_by_name(cls) + field_aliases = get_field_to_alias_mapping(cls) + + for name, field in fields.items(): + # Key here is only used to pull data from the values dict + # you should always use the NAME of the field to for field_values, etc. + # because that's how the object is constructed from a pydantic perspective + key = field.alias + if (key is None or field.alias == name) and name in field_aliases: + key = field_aliases[name] + + if key is None or (key not in values and populate_by_name): # Added this to allow population by field name + key = name + + if key in values: + if IS_PYDANTIC_V2: + type_ = field.annotation # type: ignore # Pydantic v2 + else: + type_ = typing.cast(typing.Type, field.outer_type_) # type: ignore # Pydantic < v1.10.15 + + fields_values[name] = ( + construct_type(object_=values[key], type_=type_) if type_ is not None else values[key] + ) + _fields_set.add(name) + else: + default = _get_field_default(field) + fields_values[name] = default + + # If the default values are non-null act like they've been set + # This effectively allows exclude_unset to work like exclude_none where + # the latter passes through intentionally set none values. + if default != None and default != PydanticUndefined: + _fields_set.add(name) + + # Add extras back in + extras = {} + pydantic_alias_fields = [field.alias for field in fields.values()] + internal_alias_fields = list(field_aliases.values()) + for key, value in values.items(): + # If the key is not a field by name, nor an alias to a field, then it's extra + if (key not in pydantic_alias_fields and key not in internal_alias_fields) and key not in fields: + if IS_PYDANTIC_V2: + extras[key] = value + else: + _fields_set.add(key) + fields_values[key] = value + + object.__setattr__(m, "__dict__", fields_values) + + if IS_PYDANTIC_V2: + object.__setattr__(m, "__pydantic_private__", None) + object.__setattr__(m, "__pydantic_extra__", extras) + object.__setattr__(m, "__pydantic_fields_set__", _fields_set) + else: + object.__setattr__(m, "__fields_set__", _fields_set) + m._init_private_attributes() # type: ignore # Pydantic v1 + return m + + +def _convert_undiscriminated_union_type(union_type: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: + inner_types = get_args(union_type) + if typing.Any in inner_types: + return object_ + + for inner_type in inner_types: + try: + if inspect.isclass(inner_type) and issubclass(inner_type, pydantic.BaseModel): + # Attempt a validated parse until one works + return parse_obj_as(inner_type, object_) + except Exception: + continue + + # If none of the types work, just return the first successful cast + for inner_type in inner_types: + try: + return construct_type(object_=object_, type_=inner_type) + except Exception: + continue + + +def _convert_union_type(type_: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: + base_type = get_origin(type_) or type_ + union_type = type_ + if base_type == typing_extensions.Annotated: + union_type = get_args(type_)[0] + annotated_metadata = get_args(type_)[1:] + for metadata in annotated_metadata: + if isinstance(metadata, UnionMetadata): + try: + # Cast to the correct type, based on the discriminant + for inner_type in get_args(union_type): + try: + objects_discriminant = getattr(object_, metadata.discriminant) + except: + objects_discriminant = object_[metadata.discriminant] + if inner_type.__fields__[metadata.discriminant].default == objects_discriminant: + return construct_type(object_=object_, type_=inner_type) + except Exception: + # Allow to fall through to our regular union handling + pass + return _convert_undiscriminated_union_type(union_type, object_) + + +def construct_type(*, type_: typing.Type[typing.Any], object_: typing.Any) -> typing.Any: + """ + Here we are essentially creating the same `construct` method in spirit as the above, but for all types, not just + Pydantic models. + The idea is to essentially attempt to coerce object_ to type_ (recursively) + """ + # Short circuit when dealing with optionals, don't try to coerces None to a type + if object_ is None: + return None + + base_type = get_origin(type_) or type_ + is_annotated = base_type == typing_extensions.Annotated + maybe_annotation_members = get_args(type_) + is_annotated_union = is_annotated and is_union(get_origin(maybe_annotation_members[0])) + + if base_type == typing.Any: + return object_ + + if base_type == dict: + if not isinstance(object_, typing.Mapping): + return object_ + + key_type, items_type = get_args(type_) + d = { + construct_type(object_=key, type_=key_type): construct_type(object_=item, type_=items_type) + for key, item in object_.items() + } + return d + + if base_type == list: + if not isinstance(object_, list): + return object_ + + inner_type = get_args(type_)[0] + return [construct_type(object_=entry, type_=inner_type) for entry in object_] + + if base_type == set: + if not isinstance(object_, set) and not isinstance(object_, list): + return object_ + + inner_type = get_args(type_)[0] + return {construct_type(object_=entry, type_=inner_type) for entry in object_} + + if is_union(base_type) or is_annotated_union: + return _convert_union_type(type_, object_) + + # Cannot do an `issubclass` with a literal type, let's also just confirm we have a class before this call + if ( + object_ is not None + and not is_literal_type(type_) + and ( + (inspect.isclass(base_type) and issubclass(base_type, pydantic.BaseModel)) + or ( + is_annotated + and inspect.isclass(maybe_annotation_members[0]) + and issubclass(maybe_annotation_members[0], pydantic.BaseModel) + ) + ) + ): + if IS_PYDANTIC_V2: + return type_.model_construct(**object_) + else: + return type_.construct(**object_) + + if base_type == dt.datetime: + try: + return parse_datetime(object_) + except Exception: + return object_ + + if base_type == dt.date: + try: + return parse_date(object_) + except Exception: + return object_ + + if base_type == uuid.UUID: + try: + return uuid.UUID(object_) + except Exception: + return object_ + + if base_type == int: + try: + return int(object_) + except Exception: + return object_ + + if base_type == bool: + try: + if isinstance(object_, str): + stringified_object = object_.lower() + return stringified_object == "true" or stringified_object == "1" + + return bool(object_) + except Exception: + return object_ + + return object_ + + +def _get_is_populate_by_name(model: typing.Type["Model"]) -> bool: + if IS_PYDANTIC_V2: + return model.model_config.get("populate_by_name", False) # type: ignore # Pydantic v2 + return model.__config__.allow_population_by_field_name # type: ignore # Pydantic v1 + + +PydanticField = typing.Union[ModelField, pydantic.fields.FieldInfo] + + +# Pydantic V1 swapped the typing of __fields__'s values from ModelField to FieldInfo +# And so we try to handle both V1 cases, as well as V2 (FieldInfo from model.model_fields) +def _get_model_fields( + model: typing.Type["Model"], +) -> typing.Mapping[str, PydanticField]: + if IS_PYDANTIC_V2: + return model.model_fields # type: ignore # Pydantic v2 + else: + return model.__fields__ # type: ignore # Pydantic v1 + + +def _get_field_default(field: PydanticField) -> typing.Any: + try: + value = field.get_default() # type: ignore # Pydantic < v1.10.15 + except: + value = field.default + if IS_PYDANTIC_V2: + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + return value diff --git a/src/humanloop/datasets/__init__.py b/src/humanloop/datasets/__init__.py new file mode 100644 index 00000000..ff5c1227 --- /dev/null +++ b/src/humanloop/datasets/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints + +__all__ = ["ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints"] diff --git a/src/humanloop/datasets/client.py b/src/humanloop/datasets/client.py new file mode 100644 index 00000000..30c7d310 --- /dev/null +++ b/src/humanloop/datasets/client.py @@ -0,0 +1,1330 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .. import core +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..requests.create_datapoint_request import CreateDatapointRequestParams +from ..types.datapoint_response import DatapointResponse +from ..types.dataset_response import DatasetResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.list_datasets import ListDatasets +from ..types.sort_order import SortOrder +from ..types.update_dateset_action import UpdateDatesetAction +from .raw_client import AsyncRawDatasetsClient, RawDatasetsClient +from .types.list_versions_datasets_id_versions_get_request_include_datapoints import ( + ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, +) + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class DatasetsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawDatasetsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawDatasetsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawDatasetsClient + """ + return self._raw_client + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[DatasetResponse]: + """ + List all Datasets. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datasets to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Dataset name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Datasets by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[DatasetResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.datasets.list(size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + def upsert( + self, + *, + datapoints: typing.Sequence[CreateDatapointRequestParams], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + action: typing.Optional[UpdateDatesetAction] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Create a Dataset or update it with a new version if it already exists. + + Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. + + By default, the new Dataset version will be set to the list of Datapoints provided in + the request. You can also create a new version by adding or removing Datapoints from an existing version + by specifying `action` as `add` or `remove` respectively. In this case, you may specify + the `version_id` or `environment` query parameters to identify the existing version to base + the new version on. If neither is provided, the latest created version will be used. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already + exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, + you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + + Parameters + ---------- + datapoints : typing.Sequence[CreateDatapointRequestParams] + The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + path : typing.Optional[str] + Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Dataset. + + action : typing.Optional[UpdateDatesetAction] + The action to take with the provided Datapoints. + + - If `"set"`, the created version will only contain the Datapoints provided in this request. + - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. + - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. + + If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.upsert(path='test-questions', datapoints=[{'inputs': {'question': 'What is the capital of France?'}, 'target': {'answer': 'Paris'}}, {'inputs': {'question': 'Who wrote Hamlet?'}, 'target': {'answer': 'William Shakespeare'}}], action="set", version_name='test-questions-v1', version_description='Add two new questions and answers', ) + """ + _response = self._raw_client.upsert( + datapoints=datapoints, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, + path=path, + id=id, + action=action, + attributes=attributes, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Retrieve the Dataset with the given ID. + + Unless `include_datapoints` is set to `true`, the response will not include + the Datapoints. + Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently + retrieve Datapoints for a large Dataset. + + By default, the deployed version of the Dataset is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.get(id='ds_b0baF1ca7652', version_id='dsv_6L78pqrdFi2xa', include_datapoints=True, ) + """ + _response = self._raw_client.get( + id, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, + request_options=request_options, + ) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.delete(id='id', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Move the Dataset to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + path : typing.Optional[str] + Path of the Dataset including the Dataset name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Dataset, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.move(id='id', ) + """ + _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + def list_datapoints( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[DatapointResponse]: + """ + List all Datapoints for the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datapoints to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[DatapointResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.datasets.list_datapoints(id='ds_b0baF1ca7652', size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list_datapoints( + id, version_id=version_id, environment=environment, page=page, size=size, request_options=request_options + ) + + def list_versions( + self, + id: str, + *, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListDatasets: + """ + Get a list of the versions for a Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListDatasets + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.list_versions(id='ds_b0baF1ca7652', ) + """ + _response = self._raw_client.list_versions( + id, include_datapoints=include_datapoints, request_options=request_options + ) + return _response.data + + def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.delete_dataset_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.delete_dataset_version(id, version_id, request_options=request_options) + return _response.data + + def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Update the name or description of the Dataset version. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.update_dataset_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.update_dataset_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + def upload_csv( + self, + id: str, + *, + file: core.File, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Add Datapoints from a CSV file to a Dataset. + + This will create a new version of the Dataset with the Datapoints from the CSV file. + + If either `version_id` or `environment` is provided, the new version will be based on the specified version, + with the Datapoints from the CSV file added to the existing Datapoints in the version. + If neither `version_id` nor `environment` is provided, the new version will be based on the version + of the Dataset that is deployed to the default Environment. + + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + + Parameters + ---------- + id : str + Unique identifier for the Dataset + + file : core.File + See core.File for more documentation + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. + + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.upload_csv(id='id', ) + """ + _response = self._raw_client.upload_csv( + id, + file=file, + version_id=version_id, + environment=environment, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> DatasetResponse: + """ + Deploy Dataset to Environment. + + Set the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.set_deployment(id='ds_b0baF1ca7652', environment_id='staging', version_id='dsv_6L78pqrdFi2xa', ) + """ + _response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Dataset from Environment. + + Remove the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.remove_deployment(id='ds_b0baF1ca7652', environment_id='staging', ) + """ + _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.datasets.list_environments(id='id', ) + """ + _response = self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + +class AsyncDatasetsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawDatasetsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawDatasetsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawDatasetsClient + """ + return self._raw_client + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[DatasetResponse]: + """ + List all Datasets. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datasets to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Dataset name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Datasets by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[DatasetResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.datasets.list(size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + async def upsert( + self, + *, + datapoints: typing.Sequence[CreateDatapointRequestParams], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + action: typing.Optional[UpdateDatesetAction] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Create a Dataset or update it with a new version if it already exists. + + Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. + + By default, the new Dataset version will be set to the list of Datapoints provided in + the request. You can also create a new version by adding or removing Datapoints from an existing version + by specifying `action` as `add` or `remove` respectively. In this case, you may specify + the `version_id` or `environment` query parameters to identify the existing version to base + the new version on. If neither is provided, the latest created version will be used. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already + exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, + you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + + Parameters + ---------- + datapoints : typing.Sequence[CreateDatapointRequestParams] + The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + path : typing.Optional[str] + Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Dataset. + + action : typing.Optional[UpdateDatesetAction] + The action to take with the provided Datapoints. + + - If `"set"`, the created version will only contain the Datapoints provided in this request. + - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. + - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. + + If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.upsert(path='test-questions', datapoints=[{'inputs': {'question': 'What is the capital of France?'}, 'target': {'answer': 'Paris'}}, {'inputs': {'question': 'Who wrote Hamlet?'}, 'target': {'answer': 'William Shakespeare'}}], action="set", version_name='test-questions-v1', version_description='Add two new questions and answers', ) + asyncio.run(main()) + """ + _response = await self._raw_client.upsert( + datapoints=datapoints, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, + path=path, + id=id, + action=action, + attributes=attributes, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Retrieve the Dataset with the given ID. + + Unless `include_datapoints` is set to `true`, the response will not include + the Datapoints. + Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently + retrieve Datapoints for a large Dataset. + + By default, the deployed version of the Dataset is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.get(id='ds_b0baF1ca7652', version_id='dsv_6L78pqrdFi2xa', include_datapoints=True, ) + asyncio.run(main()) + """ + _response = await self._raw_client.get( + id, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, + request_options=request_options, + ) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.delete(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Move the Dataset to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + path : typing.Optional[str] + Path of the Dataset including the Dataset name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Dataset, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.move(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + async def list_datapoints( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[DatapointResponse]: + """ + List all Datapoints for the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datapoints to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[DatapointResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.datasets.list_datapoints(id='ds_b0baF1ca7652', size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list_datapoints( + id, version_id=version_id, environment=environment, page=page, size=size, request_options=request_options + ) + + async def list_versions( + self, + id: str, + *, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListDatasets: + """ + Get a list of the versions for a Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListDatasets + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.list_versions(id='ds_b0baF1ca7652', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_versions( + id, include_datapoints=include_datapoints, request_options=request_options + ) + return _response.data + + async def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.delete_dataset_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete_dataset_version(id, version_id, request_options=request_options) + return _response.data + + async def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Update the name or description of the Dataset version. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.update_dataset_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_dataset_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + async def upload_csv( + self, + id: str, + *, + file: core.File, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: + """ + Add Datapoints from a CSV file to a Dataset. + + This will create a new version of the Dataset with the Datapoints from the CSV file. + + If either `version_id` or `environment` is provided, the new version will be based on the specified version, + with the Datapoints from the CSV file added to the existing Datapoints in the version. + If neither `version_id` nor `environment` is provided, the new version will be based on the version + of the Dataset that is deployed to the default Environment. + + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + + Parameters + ---------- + id : str + Unique identifier for the Dataset + + file : core.File + See core.File for more documentation + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. + + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.upload_csv(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.upload_csv( + id, + file=file, + version_id=version_id, + environment=environment, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> DatasetResponse: + """ + Deploy Dataset to Environment. + + Set the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DatasetResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.set_deployment(id='ds_b0baF1ca7652', environment_id='staging', version_id='dsv_6L78pqrdFi2xa', ) + asyncio.run(main()) + """ + _response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Dataset from Environment. + + Remove the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.remove_deployment(id='ds_b0baF1ca7652', environment_id='staging', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.datasets.list_environments(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_environments(id, request_options=request_options) + return _response.data diff --git a/src/humanloop/datasets/raw_client.py b/src/humanloop/datasets/raw_client.py new file mode 100644 index 00000000..774f04fc --- /dev/null +++ b/src/humanloop/datasets/raw_client.py @@ -0,0 +1,1924 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from .. import core +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..requests.create_datapoint_request import CreateDatapointRequestParams +from ..types.datapoint_response import DatapointResponse +from ..types.dataset_response import DatasetResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.http_validation_error import HttpValidationError +from ..types.list_datasets import ListDatasets +from ..types.paginated_datapoint_response import PaginatedDatapointResponse +from ..types.paginated_dataset_response import PaginatedDatasetResponse +from ..types.sort_order import SortOrder +from ..types.update_dateset_action import UpdateDatesetAction +from .types.list_versions_datasets_id_versions_get_request_include_datapoints import ( + ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, +) + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawDatasetsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[DatasetResponse]: + """ + List all Datasets. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datasets to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Dataset name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Datasets by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[DatasetResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "datasets", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDatasetResponse, + construct_type( + type_=PaginatedDatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def upsert( + self, + *, + datapoints: typing.Sequence[CreateDatapointRequestParams], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + action: typing.Optional[UpdateDatesetAction] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Create a Dataset or update it with a new version if it already exists. + + Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. + + By default, the new Dataset version will be set to the list of Datapoints provided in + the request. You can also create a new version by adding or removing Datapoints from an existing version + by specifying `action` as `add` or `remove` respectively. In this case, you may specify + the `version_id` or `environment` query parameters to identify the existing version to base + the new version on. If neither is provided, the latest created version will be used. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already + exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, + you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + + Parameters + ---------- + datapoints : typing.Sequence[CreateDatapointRequestParams] + The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + path : typing.Optional[str] + Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Dataset. + + action : typing.Optional[UpdateDatesetAction] + The action to take with the provided Datapoints. + + - If `"set"`, the created version will only contain the Datapoints provided in this request. + - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. + - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. + + If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "datasets", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + json={ + "path": path, + "id": id, + "datapoints": convert_and_respect_annotation_metadata( + object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" + ), + "action": action, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Retrieve the Dataset with the given ID. + + Unless `include_datapoints` is set to `true`, the response will not include + the Datapoints. + Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently + retrieve Datapoints for a large Dataset. + + By default, the deployed version of the Dataset is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Move the Dataset to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + path : typing.Optional[str] + Path of the Dataset including the Dataset name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Dataset, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_datapoints( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[DatapointResponse]: + """ + List all Datapoints for the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datapoints to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[DatapointResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/datapoints", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + "page": page, + "size": size, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDatapointResponse, + construct_type( + type_=PaginatedDatapointResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list_datapoints( + id, + version_id=version_id, + environment=environment, + page=page + 1, + size=size, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_versions( + self, + id: str, + *, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListDatasets]: + """ + Get a list of the versions for a Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListDatasets] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListDatasets, + construct_type( + type_=ListDatasets, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Update the name or description of the Dataset version. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def upload_csv( + self, + id: str, + *, + file: core.File, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Add Datapoints from a CSV file to a Dataset. + + This will create a new version of the Dataset with the Datapoints from the CSV file. + + If either `version_id` or `environment` is provided, the new version will be based on the specified version, + with the Datapoints from the CSV file added to the existing Datapoints in the version. + If neither `version_id` nor `environment` is provided, the new version will be based on the version + of the Dataset that is deployed to the default Environment. + + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + + Parameters + ---------- + id : str + Unique identifier for the Dataset + + file : core.File + See core.File for more documentation + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. + + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/datapoints/csv", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + data={ + "version_name": version_name, + "version_description": version_description, + }, + files={ + "file": file, + }, + headers={ + "content-type": "multipart/form-data", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[DatasetResponse]: + """ + Deploy Dataset to Environment. + + Set the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Dataset from Environment. + + Remove the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawDatasetsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[DatasetResponse]: + """ + List all Datasets. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datasets to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Dataset name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Datasets by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[DatasetResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "datasets", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDatasetResponse, + construct_type( + type_=PaginatedDatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def upsert( + self, + *, + datapoints: typing.Sequence[CreateDatapointRequestParams], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + action: typing.Optional[UpdateDatesetAction] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Create a Dataset or update it with a new version if it already exists. + + Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. + + By default, the new Dataset version will be set to the list of Datapoints provided in + the request. You can also create a new version by adding or removing Datapoints from an existing version + by specifying `action` as `add` or `remove` respectively. In this case, you may specify + the `version_id` or `environment` query parameters to identify the existing version to base + the new version on. If neither is provided, the latest created version will be used. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already + exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, + you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + + Parameters + ---------- + datapoints : typing.Sequence[CreateDatapointRequestParams] + The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + path : typing.Optional[str] + Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Dataset. + + action : typing.Optional[UpdateDatesetAction] + The action to take with the provided Datapoints. + + - If `"set"`, the created version will only contain the Datapoints provided in this request. + - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. + - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. + + If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "datasets", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + json={ + "path": path, + "id": id, + "datapoints": convert_and_respect_annotation_metadata( + object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" + ), + "action": action, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Retrieve the Dataset with the given ID. + + Unless `include_datapoints` is set to `true`, the response will not include + the Datapoints. + Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently + retrieve Datapoints for a large Dataset. + + By default, the deployed version of the Dataset is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Move the Dataset to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + path : typing.Optional[str] + Path of the Dataset including the Dataset name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Dataset, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_datapoints( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[DatapointResponse]: + """ + List all Datapoints for the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Datapoints to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[DatapointResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/datapoints", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + "page": page, + "size": size, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDatapointResponse, + construct_type( + type_=PaginatedDatapointResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list_datapoints( + id, + version_id=version_id, + environment=environment, + page=page + 1, + size=size, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_versions( + self, + id: str, + *, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListDatasets]: + """ + Get a list of the versions for a Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListDatasets] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListDatasets, + construct_type( + type_=ListDatasets, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Update the name or description of the Dataset version. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def upload_csv( + self, + id: str, + *, + file: core.File, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Add Datapoints from a CSV file to a Dataset. + + This will create a new version of the Dataset with the Datapoints from the CSV file. + + If either `version_id` or `environment` is provided, the new version will be based on the specified version, + with the Datapoints from the CSV file added to the existing Datapoints in the version. + If neither `version_id` nor `environment` is provided, the new version will be based on the version + of the Dataset that is deployed to the default Environment. + + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + + Parameters + ---------- + id : str + Unique identifier for the Dataset + + file : core.File + See core.File for more documentation + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. + + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/datapoints/csv", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + data={ + "version_name": version_name, + "version_description": version_description, + }, + files={ + "file": file, + }, + headers={ + "content-type": "multipart/form-data", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Deploy Dataset to Environment. + + Set the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Dataset from Environment. + + Remove the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/datasets/types/__init__.py b/src/humanloop/datasets/types/__init__.py new file mode 100644 index 00000000..419263e1 --- /dev/null +++ b/src/humanloop/datasets/types/__init__.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .list_versions_datasets_id_versions_get_request_include_datapoints import ( + ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, +) + +__all__ = ["ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints"] diff --git a/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py b/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py new file mode 100644 index 00000000..6c04f917 --- /dev/null +++ b/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints = typing.Union[ + typing.Literal["latest_committed", "latest_saved"], typing.Any +] diff --git a/src/humanloop/directories/__init__.py b/src/humanloop/directories/__init__.py new file mode 100644 index 00000000..5cde0202 --- /dev/null +++ b/src/humanloop/directories/__init__.py @@ -0,0 +1,4 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + diff --git a/src/humanloop/directories/client.py b/src/humanloop/directories/client.py new file mode 100644 index 00000000..62972278 --- /dev/null +++ b/src/humanloop/directories/client.py @@ -0,0 +1,385 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.directory_response import DirectoryResponse +from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse +from .raw_client import AsyncRawDirectoriesClient, RawDirectoriesClient + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class DirectoriesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawDirectoriesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawDirectoriesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawDirectoriesClient + """ + return self._raw_client + + def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[DirectoryResponse]: + """ + Retrieve a list of all Directories. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[DirectoryResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.directories.list() + """ + _response = self._raw_client.list(request_options=request_options) + return _response.data + + def create( + self, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DirectoryResponse: + """ + Creates a Directory. + + Parameters + ---------- + name : typing.Optional[str] + Name of the directory to create. + + parent_id : typing.Optional[str] + ID of the parent directory. Starts with `dir_`. + + path : typing.Optional[str] + Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DirectoryResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.directories.create() + """ + _response = self._raw_client.create(name=name, parent_id=parent_id, path=path, request_options=request_options) + return _response.data + + def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DirectoryWithParentsAndChildrenResponse: + """ + Fetches a directory by ID. + + Parameters + ---------- + id : str + String ID of directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DirectoryWithParentsAndChildrenResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.directories.get(id='id', ) + """ + _response = self._raw_client.get(id, request_options=request_options) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Directory with the given ID. + + The Directory must be empty (i.e. contain no Directories or Files). + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.directories.delete(id='id', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def update( + self, + id: str, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DirectoryResponse: + """ + Update the Directory with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + name : typing.Optional[str] + Name to set for the directory. + + parent_id : typing.Optional[str] + ID of the parent directory. Specify this to move directories. Starts with `dir_`. + + path : typing.Optional[str] + Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DirectoryResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.directories.update(id='id', ) + """ + _response = self._raw_client.update( + id, name=name, parent_id=parent_id, path=path, request_options=request_options + ) + return _response.data + + +class AsyncDirectoriesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawDirectoriesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawDirectoriesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawDirectoriesClient + """ + return self._raw_client + + async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[DirectoryResponse]: + """ + Retrieve a list of all Directories. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[DirectoryResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.directories.list() + asyncio.run(main()) + """ + _response = await self._raw_client.list(request_options=request_options) + return _response.data + + async def create( + self, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DirectoryResponse: + """ + Creates a Directory. + + Parameters + ---------- + name : typing.Optional[str] + Name of the directory to create. + + parent_id : typing.Optional[str] + ID of the parent directory. Starts with `dir_`. + + path : typing.Optional[str] + Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DirectoryResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.directories.create() + asyncio.run(main()) + """ + _response = await self._raw_client.create( + name=name, parent_id=parent_id, path=path, request_options=request_options + ) + return _response.data + + async def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DirectoryWithParentsAndChildrenResponse: + """ + Fetches a directory by ID. + + Parameters + ---------- + id : str + String ID of directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DirectoryWithParentsAndChildrenResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.directories.get(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get(id, request_options=request_options) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Directory with the given ID. + + The Directory must be empty (i.e. contain no Directories or Files). + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.directories.delete(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def update( + self, + id: str, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DirectoryResponse: + """ + Update the Directory with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + name : typing.Optional[str] + Name to set for the directory. + + parent_id : typing.Optional[str] + ID of the parent directory. Specify this to move directories. Starts with `dir_`. + + path : typing.Optional[str] + Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DirectoryResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.directories.update(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update( + id, name=name, parent_id=parent_id, path=path, request_options=request_options + ) + return _response.data diff --git a/src/humanloop/directories/raw_client.py b/src/humanloop/directories/raw_client.py new file mode 100644 index 00000000..e2f10091 --- /dev/null +++ b/src/humanloop/directories/raw_client.py @@ -0,0 +1,596 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.request_options import RequestOptions +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.directory_response import DirectoryResponse +from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse +from ..types.http_validation_error import HttpValidationError + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawDirectoriesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[DirectoryResponse]]: + """ + Retrieve a list of all Directories. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[DirectoryResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "directories", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[DirectoryResponse], + construct_type( + type_=typing.List[DirectoryResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def create( + self, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DirectoryResponse]: + """ + Creates a Directory. + + Parameters + ---------- + name : typing.Optional[str] + Name of the directory to create. + + parent_id : typing.Optional[str] + ID of the parent directory. Starts with `dir_`. + + path : typing.Optional[str] + Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DirectoryResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "directories", + method="POST", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[DirectoryWithParentsAndChildrenResponse]: + """ + Fetches a directory by ID. + + Parameters + ---------- + id : str + String ID of directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DirectoryWithParentsAndChildrenResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryWithParentsAndChildrenResponse, + construct_type( + type_=DirectoryWithParentsAndChildrenResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Directory with the given ID. + + The Directory must be empty (i.e. contain no Directories or Files). + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update( + self, + id: str, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DirectoryResponse]: + """ + Update the Directory with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + name : typing.Optional[str] + Name to set for the directory. + + parent_id : typing.Optional[str] + ID of the parent directory. Specify this to move directories. Starts with `dir_`. + + path : typing.Optional[str] + Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DirectoryResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="PATCH", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawDirectoriesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[DirectoryResponse]]: + """ + Retrieve a list of all Directories. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[DirectoryResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "directories", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[DirectoryResponse], + construct_type( + type_=typing.List[DirectoryResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def create( + self, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DirectoryResponse]: + """ + Creates a Directory. + + Parameters + ---------- + name : typing.Optional[str] + Name of the directory to create. + + parent_id : typing.Optional[str] + ID of the parent directory. Starts with `dir_`. + + path : typing.Optional[str] + Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DirectoryResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "directories", + method="POST", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[DirectoryWithParentsAndChildrenResponse]: + """ + Fetches a directory by ID. + + Parameters + ---------- + id : str + String ID of directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DirectoryWithParentsAndChildrenResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryWithParentsAndChildrenResponse, + construct_type( + type_=DirectoryWithParentsAndChildrenResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Directory with the given ID. + + The Directory must be empty (i.e. contain no Directories or Files). + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update( + self, + id: str, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DirectoryResponse]: + """ + Update the Directory with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + name : typing.Optional[str] + Name to set for the directory. + + parent_id : typing.Optional[str] + ID of the parent directory. Specify this to move directories. Starts with `dir_`. + + path : typing.Optional[str] + Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DirectoryResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="PATCH", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/environment.py b/src/humanloop/environment.py new file mode 100644 index 00000000..b9263608 --- /dev/null +++ b/src/humanloop/environment.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import enum + + +class HumanloopEnvironment(enum.Enum): + DEFAULT = "https://api.humanloop.com/v5" diff --git a/src/humanloop/errors/__init__.py b/src/humanloop/errors/__init__.py new file mode 100644 index 00000000..67183e01 --- /dev/null +++ b/src/humanloop/errors/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .unprocessable_entity_error import UnprocessableEntityError + +__all__ = ["UnprocessableEntityError"] diff --git a/src/humanloop/errors/unprocessable_entity_error.py b/src/humanloop/errors/unprocessable_entity_error.py new file mode 100644 index 00000000..d3f9c5d8 --- /dev/null +++ b/src/humanloop/errors/unprocessable_entity_error.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..core.api_error import ApiError +from ..types.http_validation_error import HttpValidationError + + +class UnprocessableEntityError(ApiError): + def __init__(self, body: HttpValidationError, headers: typing.Optional[typing.Dict[str, str]] = None): + super().__init__(status_code=422, headers=headers, body=body) diff --git a/src/humanloop/evaluations/__init__.py b/src/humanloop/evaluations/__init__.py new file mode 100644 index 00000000..3498bb70 --- /dev/null +++ b/src/humanloop/evaluations/__init__.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import ( + AddEvaluatorsRequestEvaluatorsItem, + CreateEvaluationRequestEvaluatorsItem, + CreateRunRequestDataset, + CreateRunRequestVersion, +) +from .requests import ( + AddEvaluatorsRequestEvaluatorsItemParams, + CreateEvaluationRequestEvaluatorsItemParams, + CreateRunRequestDatasetParams, + CreateRunRequestVersionParams, +) + +__all__ = [ + "AddEvaluatorsRequestEvaluatorsItem", + "AddEvaluatorsRequestEvaluatorsItemParams", + "CreateEvaluationRequestEvaluatorsItem", + "CreateEvaluationRequestEvaluatorsItemParams", + "CreateRunRequestDataset", + "CreateRunRequestDatasetParams", + "CreateRunRequestVersion", + "CreateRunRequestVersionParams", +] diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py new file mode 100644 index 00000000..006fb99b --- /dev/null +++ b/src/humanloop/evaluations/client.py @@ -0,0 +1,1177 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..requests.file_request import FileRequestParams +from ..types.evaluation_response import EvaluationResponse +from ..types.evaluation_run_response import EvaluationRunResponse +from ..types.evaluation_runs_response import EvaluationRunsResponse +from ..types.evaluation_stats import EvaluationStats +from ..types.evaluation_status import EvaluationStatus +from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse +from .raw_client import AsyncRawEvaluationsClient, RawEvaluationsClient +from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams +from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams +from .requests.create_run_request_dataset import CreateRunRequestDatasetParams +from .requests.create_run_request_version import CreateRunRequestVersionParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EvaluationsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawEvaluationsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawEvaluationsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawEvaluationsClient + """ + return self._raw_client + + def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[EvaluationResponse]: + """ + Retrieve a list of Evaluations for the specified File. + + Parameters + ---------- + file_id : str + Filter by File ID. Only Evaluations for the specified File will be returned. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluations to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[EvaluationResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.evaluations.list(file_id='pr_30gco7dx6JDq4200GVOHa', size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list(file_id=file_id, page=page, size=size, request_options=request_options) + + def create( + self, + *, + evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + file: typing.Optional[FileRequestParams] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationResponse: + """ + Create an Evaluation. + + Create a new Evaluation by specifying the File to evaluate, and a name + for the Evaluation. + You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. + + Parameters + ---------- + evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] + The Evaluators used to evaluate. + + file : typing.Optional[FileRequestParams] + The File to associate with the Evaluation. This File contains the Logs you're evaluating. + + name : typing.Optional[str] + Name of the Evaluation to help identify it. Must be unique within the associated File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.create(evaluators=[{'version_id': 'version_id'}], ) + """ + _response = self._raw_client.create( + evaluators=evaluators, file=file, name=name, request_options=request_options + ) + return _response.data + + def add_evaluators( + self, + id: str, + *, + evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationResponse: + """ + Add Evaluators to an Evaluation. + + The Evaluators will be run on the Logs generated for the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] + The Evaluators to add to this Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.add_evaluators(id='id', evaluators=[{'version_id': 'version_id'}], ) + """ + _response = self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options) + return _response.data + + def remove_evaluator( + self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> EvaluationResponse: + """ + Remove an Evaluator from an Evaluation. + + The Evaluator will no longer be run on the Logs in the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluator_version_id : str + Unique identifier for Evaluator Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.remove_evaluator(id='id', evaluator_version_id='evaluator_version_id', ) + """ + _response = self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options) + return _response.data + + def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse: + """ + Get an Evaluation. + + This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, + such as its name. + + To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. + To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.get(id='ev_567yza', ) + """ + _response = self._raw_client.get(id, request_options=request_options) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete an Evaluation. + + The Runs and Evaluators in the Evaluation will not be deleted. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.delete(id='ev_567yza', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def list_runs_for_evaluation( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> EvaluationRunsResponse: + """ + List all Runs for an Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunsResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.list_runs_for_evaluation(id='id', ) + """ + _response = self._raw_client.list_runs_for_evaluation(id, request_options=request_options) + return _response.data + + def create_run( + self, + id: str, + *, + dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, + version: typing.Optional[CreateRunRequestVersionParams] = OMIT, + orchestrated: typing.Optional[bool] = OMIT, + use_existing_logs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationRunResponse: + """ + Create an Evaluation Run. + + Optionally specify the Dataset and version to be evaluated. + + Humanloop will automatically start generating Logs and running Evaluators where + `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` + and then generate and submit the required Logs via the API. + + If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, + avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` + referencing a datapoint in the specified Dataset will be associated with the Run. + + To keep updated on the progress of the Run, you can poll the Run using + the `GET /evaluations/{id}/runs` endpoint and check its status. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + dataset : typing.Optional[CreateRunRequestDatasetParams] + Dataset to use in this Run. + + version : typing.Optional[CreateRunRequestVersionParams] + Version to use in this Run. + + orchestrated : typing.Optional[bool] + Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + + use_existing_logs : typing.Optional[bool] + If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.create_run(id='id', ) + """ + _response = self._raw_client.create_run( + id, + dataset=dataset, + version=version, + orchestrated=orchestrated, + use_existing_logs=use_existing_logs, + request_options=request_options, + ) + return _response.data + + def add_existing_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Add an existing Run to the specified Evaluation. + + This is useful if you want to compare the Runs in this Evaluation with an existing Run + that exists within another Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.add_existing_run(id='id', run_id='run_id', ) + """ + _response = self._raw_client.add_existing_run(id, run_id, request_options=request_options) + return _response.data + + def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Remove a Run from an Evaluation. + + The Logs and Versions used in the Run will not be deleted. + If this Run is used in any other Evaluations, it will still be available in those Evaluations. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.remove_run(id='id', run_id='run_id', ) + """ + _response = self._raw_client.remove_run(id, run_id, request_options=request_options) + return _response.data + + def update_evaluation_run( + self, + id: str, + run_id: str, + *, + control: typing.Optional[bool] = OMIT, + status: typing.Optional[EvaluationStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationRunResponse: + """ + Update an Evaluation Run. + + Specify `control=true` to use this Run as the control Run for the Evaluation. + You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + control : typing.Optional[bool] + If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. + + status : typing.Optional[EvaluationStatus] + Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.update_evaluation_run(id='id', run_id='run_id', ) + """ + _response = self._raw_client.update_evaluation_run( + id, run_id, control=control, status=status, request_options=request_options + ) + return _response.data + + def add_logs_to_run( + self, + id: str, + run_id: str, + *, + log_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationRunResponse: + """ + Add the specified Logs to a Run. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + log_ids : typing.Sequence[str] + The IDs of the Logs to add to the Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.add_logs_to_run(id='id', run_id='run_id', log_ids=['log_ids'], ) + """ + _response = self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options) + return _response.data + + def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats: + """ + Get Evaluation Stats. + + Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the + corresponding Evaluator statistics (such as the mean and percentiles). + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationStats + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.get_stats(id='id', ) + """ + _response = self._raw_client.get_stats(id, request_options=request_options) + return _response.data + + def get_logs( + self, + id: str, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataEvaluationLogResponse: + """ + Get the Logs associated to a specific Evaluation. + + This returns the Logs associated to all Runs within with the Evaluation. + + Parameters + ---------- + id : str + String ID of evaluation. Starts with `ev_` or `evr_`. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Filter by Run IDs. Only Logs for the specified Runs will be returned. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataEvaluationLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluations.get_logs(id='id', ) + """ + _response = self._raw_client.get_logs(id, page=page, size=size, run_id=run_id, request_options=request_options) + return _response.data + + +class AsyncEvaluationsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawEvaluationsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawEvaluationsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawEvaluationsClient + """ + return self._raw_client + + async def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[EvaluationResponse]: + """ + Retrieve a list of Evaluations for the specified File. + + Parameters + ---------- + file_id : str + Filter by File ID. Only Evaluations for the specified File will be returned. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluations to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[EvaluationResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.evaluations.list(file_id='pr_30gco7dx6JDq4200GVOHa', size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list(file_id=file_id, page=page, size=size, request_options=request_options) + + async def create( + self, + *, + evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + file: typing.Optional[FileRequestParams] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationResponse: + """ + Create an Evaluation. + + Create a new Evaluation by specifying the File to evaluate, and a name + for the Evaluation. + You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. + + Parameters + ---------- + evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] + The Evaluators used to evaluate. + + file : typing.Optional[FileRequestParams] + The File to associate with the Evaluation. This File contains the Logs you're evaluating. + + name : typing.Optional[str] + Name of the Evaluation to help identify it. Must be unique within the associated File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.create(evaluators=[{'version_id': 'version_id'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.create( + evaluators=evaluators, file=file, name=name, request_options=request_options + ) + return _response.data + + async def add_evaluators( + self, + id: str, + *, + evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationResponse: + """ + Add Evaluators to an Evaluation. + + The Evaluators will be run on the Logs generated for the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] + The Evaluators to add to this Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.add_evaluators(id='id', evaluators=[{'version_id': 'version_id'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options) + return _response.data + + async def remove_evaluator( + self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> EvaluationResponse: + """ + Remove an Evaluator from an Evaluation. + + The Evaluator will no longer be run on the Logs in the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluator_version_id : str + Unique identifier for Evaluator Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.remove_evaluator(id='id', evaluator_version_id='evaluator_version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options) + return _response.data + + async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse: + """ + Get an Evaluation. + + This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, + such as its name. + + To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. + To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.get(id='ev_567yza', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get(id, request_options=request_options) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete an Evaluation. + + The Runs and Evaluators in the Evaluation will not be deleted. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.delete(id='ev_567yza', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def list_runs_for_evaluation( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> EvaluationRunsResponse: + """ + List all Runs for an Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunsResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.list_runs_for_evaluation(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_runs_for_evaluation(id, request_options=request_options) + return _response.data + + async def create_run( + self, + id: str, + *, + dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, + version: typing.Optional[CreateRunRequestVersionParams] = OMIT, + orchestrated: typing.Optional[bool] = OMIT, + use_existing_logs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationRunResponse: + """ + Create an Evaluation Run. + + Optionally specify the Dataset and version to be evaluated. + + Humanloop will automatically start generating Logs and running Evaluators where + `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` + and then generate and submit the required Logs via the API. + + If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, + avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` + referencing a datapoint in the specified Dataset will be associated with the Run. + + To keep updated on the progress of the Run, you can poll the Run using + the `GET /evaluations/{id}/runs` endpoint and check its status. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + dataset : typing.Optional[CreateRunRequestDatasetParams] + Dataset to use in this Run. + + version : typing.Optional[CreateRunRequestVersionParams] + Version to use in this Run. + + orchestrated : typing.Optional[bool] + Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + + use_existing_logs : typing.Optional[bool] + If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.create_run(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.create_run( + id, + dataset=dataset, + version=version, + orchestrated=orchestrated, + use_existing_logs=use_existing_logs, + request_options=request_options, + ) + return _response.data + + async def add_existing_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Add an existing Run to the specified Evaluation. + + This is useful if you want to compare the Runs in this Evaluation with an existing Run + that exists within another Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.add_existing_run(id='id', run_id='run_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.add_existing_run(id, run_id, request_options=request_options) + return _response.data + + async def remove_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove a Run from an Evaluation. + + The Logs and Versions used in the Run will not be deleted. + If this Run is used in any other Evaluations, it will still be available in those Evaluations. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.remove_run(id='id', run_id='run_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_run(id, run_id, request_options=request_options) + return _response.data + + async def update_evaluation_run( + self, + id: str, + run_id: str, + *, + control: typing.Optional[bool] = OMIT, + status: typing.Optional[EvaluationStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationRunResponse: + """ + Update an Evaluation Run. + + Specify `control=true` to use this Run as the control Run for the Evaluation. + You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + control : typing.Optional[bool] + If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. + + status : typing.Optional[EvaluationStatus] + Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.update_evaluation_run(id='id', run_id='run_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_evaluation_run( + id, run_id, control=control, status=status, request_options=request_options + ) + return _response.data + + async def add_logs_to_run( + self, + id: str, + run_id: str, + *, + log_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluationRunResponse: + """ + Add the specified Logs to a Run. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + log_ids : typing.Sequence[str] + The IDs of the Logs to add to the Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationRunResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.add_logs_to_run(id='id', run_id='run_id', log_ids=['log_ids'], ) + asyncio.run(main()) + """ + _response = await self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options) + return _response.data + + async def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats: + """ + Get Evaluation Stats. + + Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the + corresponding Evaluator statistics (such as the mean and percentiles). + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluationStats + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.get_stats(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get_stats(id, request_options=request_options) + return _response.data + + async def get_logs( + self, + id: str, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataEvaluationLogResponse: + """ + Get the Logs associated to a specific Evaluation. + + This returns the Logs associated to all Runs within with the Evaluation. + + Parameters + ---------- + id : str + String ID of evaluation. Starts with `ev_` or `evr_`. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Filter by Run IDs. Only Logs for the specified Runs will be returned. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataEvaluationLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluations.get_logs(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get_logs( + id, page=page, size=size, run_id=run_id, request_options=request_options + ) + return _response.data diff --git a/src/humanloop/evaluations/raw_client.py b/src/humanloop/evaluations/raw_client.py new file mode 100644 index 00000000..85c3dbf3 --- /dev/null +++ b/src/humanloop/evaluations/raw_client.py @@ -0,0 +1,1845 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..requests.file_request import FileRequestParams +from ..types.evaluation_response import EvaluationResponse +from ..types.evaluation_run_response import EvaluationRunResponse +from ..types.evaluation_runs_response import EvaluationRunsResponse +from ..types.evaluation_stats import EvaluationStats +from ..types.evaluation_status import EvaluationStatus +from ..types.http_validation_error import HttpValidationError +from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse +from ..types.paginated_evaluation_response import PaginatedEvaluationResponse +from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams +from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams +from .requests.create_run_request_dataset import CreateRunRequestDatasetParams +from .requests.create_run_request_version import CreateRunRequestVersionParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawEvaluationsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[EvaluationResponse]: + """ + Retrieve a list of Evaluations for the specified File. + + Parameters + ---------- + file_id : str + Filter by File ID. Only Evaluations for the specified File will be returned. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluations to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[EvaluationResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="GET", + params={ + "file_id": file_id, + "page": page, + "size": size, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedEvaluationResponse, + construct_type( + type_=PaginatedEvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + file_id=file_id, + page=page + 1, + size=size, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def create( + self, + *, + evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + file: typing.Optional[FileRequestParams] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationResponse]: + """ + Create an Evaluation. + + Create a new Evaluation by specifying the File to evaluate, and a name + for the Evaluation. + You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. + + Parameters + ---------- + evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] + The Evaluators used to evaluate. + + file : typing.Optional[FileRequestParams] + The File to associate with the Evaluation. This File contains the Logs you're evaluating. + + name : typing.Optional[str] + Name of the Evaluation to help identify it. Must be unique within the associated File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "file": convert_and_respect_annotation_metadata( + object_=file, annotation=FileRequestParams, direction="write" + ), + "name": name, + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def add_evaluators( + self, + id: str, + *, + evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationResponse]: + """ + Add Evaluators to an Evaluation. + + The Evaluators will be run on the Logs generated for the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] + The Evaluators to add to this Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_evaluator( + self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationResponse]: + """ + Remove an Evaluator from an Evaluation. + + The Evaluator will no longer be run on the Logs in the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluator_version_id : str + Unique identifier for Evaluator Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationResponse]: + """ + Get an Evaluation. + + This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, + such as its name. + + To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. + To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete an Evaluation. + + The Runs and Evaluators in the Evaluation will not be deleted. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_runs_for_evaluation( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationRunsResponse]: + """ + List all Runs for an Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunsResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunsResponse, + construct_type( + type_=EvaluationRunsResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def create_run( + self, + id: str, + *, + dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, + version: typing.Optional[CreateRunRequestVersionParams] = OMIT, + orchestrated: typing.Optional[bool] = OMIT, + use_existing_logs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationRunResponse]: + """ + Create an Evaluation Run. + + Optionally specify the Dataset and version to be evaluated. + + Humanloop will automatically start generating Logs and running Evaluators where + `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` + and then generate and submit the required Logs via the API. + + If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, + avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` + referencing a datapoint in the specified Dataset will be associated with the Run. + + To keep updated on the progress of the Run, you can poll the Run using + the `GET /evaluations/{id}/runs` endpoint and check its status. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + dataset : typing.Optional[CreateRunRequestDatasetParams] + Dataset to use in this Run. + + version : typing.Optional[CreateRunRequestVersionParams] + Version to use in this Run. + + orchestrated : typing.Optional[bool] + Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + + use_existing_logs : typing.Optional[bool] + If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="POST", + json={ + "dataset": convert_and_respect_annotation_metadata( + object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" + ), + "version": convert_and_respect_annotation_metadata( + object_=version, annotation=CreateRunRequestVersionParams, direction="write" + ), + "orchestrated": orchestrated, + "use_existing_logs": use_existing_logs, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def add_existing_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.Optional[typing.Any]]: + """ + Add an existing Run to the specified Evaluation. + + This is useful if you want to compare the Runs in this Evaluation with an existing Run + that exists within another Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.Optional[typing.Any]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove a Run from an Evaluation. + + The Logs and Versions used in the Run will not be deleted. + If this Run is used in any other Evaluations, it will still be available in those Evaluations. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_evaluation_run( + self, + id: str, + run_id: str, + *, + control: typing.Optional[bool] = OMIT, + status: typing.Optional[EvaluationStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationRunResponse]: + """ + Update an Evaluation Run. + + Specify `control=true` to use this Run as the control Run for the Evaluation. + You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + control : typing.Optional[bool] + If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. + + status : typing.Optional[EvaluationStatus] + Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="PATCH", + json={ + "control": control, + "status": status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def add_logs_to_run( + self, + id: str, + run_id: str, + *, + log_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationRunResponse]: + """ + Add the specified Logs to a Run. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + log_ids : typing.Sequence[str] + The IDs of the Logs to add to the Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", + method="POST", + json={ + "log_ids": log_ids, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get_stats( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationStats]: + """ + Get Evaluation Stats. + + Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the + corresponding Evaluator statistics (such as the mean and percentiles). + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationStats] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/stats", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationStats, + construct_type( + type_=EvaluationStats, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get_logs( + self, + id: str, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PaginatedDataEvaluationLogResponse]: + """ + Get the Logs associated to a specific Evaluation. + + This returns the Logs associated to all Runs within with the Evaluation. + + Parameters + ---------- + id : str + String ID of evaluation. Starts with `ev_` or `evr_`. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Filter by Run IDs. Only Logs for the specified Runs will be returned. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PaginatedDataEvaluationLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/logs", + method="GET", + params={ + "page": page, + "size": size, + "run_id": run_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataEvaluationLogResponse, + construct_type( + type_=PaginatedDataEvaluationLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawEvaluationsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[EvaluationResponse]: + """ + Retrieve a list of Evaluations for the specified File. + + Parameters + ---------- + file_id : str + Filter by File ID. Only Evaluations for the specified File will be returned. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluations to fetch. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[EvaluationResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="GET", + params={ + "file_id": file_id, + "page": page, + "size": size, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedEvaluationResponse, + construct_type( + type_=PaginatedEvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + file_id=file_id, + page=page + 1, + size=size, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def create( + self, + *, + evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + file: typing.Optional[FileRequestParams] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Create an Evaluation. + + Create a new Evaluation by specifying the File to evaluate, and a name + for the Evaluation. + You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. + + Parameters + ---------- + evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] + The Evaluators used to evaluate. + + file : typing.Optional[FileRequestParams] + The File to associate with the Evaluation. This File contains the Logs you're evaluating. + + name : typing.Optional[str] + Name of the Evaluation to help identify it. Must be unique within the associated File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "file": convert_and_respect_annotation_metadata( + object_=file, annotation=FileRequestParams, direction="write" + ), + "name": name, + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def add_evaluators( + self, + id: str, + *, + evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Add Evaluators to an Evaluation. + + The Evaluators will be run on the Logs generated for the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] + The Evaluators to add to this Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_evaluator( + self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Remove an Evaluator from an Evaluation. + + The Evaluator will no longer be run on the Logs in the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluator_version_id : str + Unique identifier for Evaluator Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Get an Evaluation. + + This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, + such as its name. + + To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. + To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete an Evaluation. + + The Runs and Evaluators in the Evaluation will not be deleted. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_runs_for_evaluation( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationRunsResponse]: + """ + List all Runs for an Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunsResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunsResponse, + construct_type( + type_=EvaluationRunsResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def create_run( + self, + id: str, + *, + dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, + version: typing.Optional[CreateRunRequestVersionParams] = OMIT, + orchestrated: typing.Optional[bool] = OMIT, + use_existing_logs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationRunResponse]: + """ + Create an Evaluation Run. + + Optionally specify the Dataset and version to be evaluated. + + Humanloop will automatically start generating Logs and running Evaluators where + `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` + and then generate and submit the required Logs via the API. + + If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, + avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` + referencing a datapoint in the specified Dataset will be associated with the Run. + + To keep updated on the progress of the Run, you can poll the Run using + the `GET /evaluations/{id}/runs` endpoint and check its status. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + dataset : typing.Optional[CreateRunRequestDatasetParams] + Dataset to use in this Run. + + version : typing.Optional[CreateRunRequestVersionParams] + Version to use in this Run. + + orchestrated : typing.Optional[bool] + Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + + use_existing_logs : typing.Optional[bool] + If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="POST", + json={ + "dataset": convert_and_respect_annotation_metadata( + object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" + ), + "version": convert_and_respect_annotation_metadata( + object_=version, annotation=CreateRunRequestVersionParams, direction="write" + ), + "orchestrated": orchestrated, + "use_existing_logs": use_existing_logs, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def add_existing_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.Optional[typing.Any]]: + """ + Add an existing Run to the specified Evaluation. + + This is useful if you want to compare the Runs in this Evaluation with an existing Run + that exists within another Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.Optional[typing.Any]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove a Run from an Evaluation. + + The Logs and Versions used in the Run will not be deleted. + If this Run is used in any other Evaluations, it will still be available in those Evaluations. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_evaluation_run( + self, + id: str, + run_id: str, + *, + control: typing.Optional[bool] = OMIT, + status: typing.Optional[EvaluationStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationRunResponse]: + """ + Update an Evaluation Run. + + Specify `control=true` to use this Run as the control Run for the Evaluation. + You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + control : typing.Optional[bool] + If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. + + status : typing.Optional[EvaluationStatus] + Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="PATCH", + json={ + "control": control, + "status": status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def add_logs_to_run( + self, + id: str, + run_id: str, + *, + log_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationRunResponse]: + """ + Add the specified Logs to a Run. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + log_ids : typing.Sequence[str] + The IDs of the Logs to add to the Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", + method="POST", + json={ + "log_ids": log_ids, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get_stats( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationStats]: + """ + Get Evaluation Stats. + + Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the + corresponding Evaluator statistics (such as the mean and percentiles). + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationStats] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/stats", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationStats, + construct_type( + type_=EvaluationStats, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get_logs( + self, + id: str, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PaginatedDataEvaluationLogResponse]: + """ + Get the Logs associated to a specific Evaluation. + + This returns the Logs associated to all Runs within with the Evaluation. + + Parameters + ---------- + id : str + String ID of evaluation. Starts with `ev_` or `evr_`. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Filter by Run IDs. Only Logs for the specified Runs will be returned. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PaginatedDataEvaluationLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/logs", + method="GET", + params={ + "page": page, + "size": size, + "run_id": run_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataEvaluationLogResponse, + construct_type( + type_=PaginatedDataEvaluationLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/evaluations/requests/__init__.py b/src/humanloop/evaluations/requests/__init__.py new file mode 100644 index 00000000..1997f1a0 --- /dev/null +++ b/src/humanloop/evaluations/requests/__init__.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams +from .create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams +from .create_run_request_dataset import CreateRunRequestDatasetParams +from .create_run_request_version import CreateRunRequestVersionParams + +__all__ = [ + "AddEvaluatorsRequestEvaluatorsItemParams", + "CreateEvaluationRequestEvaluatorsItemParams", + "CreateRunRequestDatasetParams", + "CreateRunRequestVersionParams", +] diff --git a/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py b/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py new file mode 100644 index 00000000..24da1248 --- /dev/null +++ b/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.evaluator_file_id import EvaluatorFileIdParams +from ...requests.evaluator_file_path import EvaluatorFilePathParams +from ...requests.evaluator_version_id import EvaluatorVersionIdParams + +AddEvaluatorsRequestEvaluatorsItemParams = typing.Union[ + EvaluatorVersionIdParams, EvaluatorFileIdParams, EvaluatorFilePathParams +] diff --git a/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py b/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py new file mode 100644 index 00000000..a53624c0 --- /dev/null +++ b/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.evaluator_file_id import EvaluatorFileIdParams +from ...requests.evaluator_file_path import EvaluatorFilePathParams +from ...requests.evaluator_version_id import EvaluatorVersionIdParams + +CreateEvaluationRequestEvaluatorsItemParams = typing.Union[ + EvaluatorVersionIdParams, EvaluatorFileIdParams, EvaluatorFilePathParams +] diff --git a/src/humanloop/evaluations/requests/create_run_request_dataset.py b/src/humanloop/evaluations/requests/create_run_request_dataset.py new file mode 100644 index 00000000..cabeb7f2 --- /dev/null +++ b/src/humanloop/evaluations/requests/create_run_request_dataset.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.file_id import FileIdParams +from ...requests.file_path import FilePathParams +from ...requests.version_id import VersionIdParams + +CreateRunRequestDatasetParams = typing.Union[VersionIdParams, FileIdParams, FilePathParams] diff --git a/src/humanloop/evaluations/requests/create_run_request_version.py b/src/humanloop/evaluations/requests/create_run_request_version.py new file mode 100644 index 00000000..830ee49e --- /dev/null +++ b/src/humanloop/evaluations/requests/create_run_request_version.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.file_id import FileIdParams +from ...requests.file_path import FilePathParams +from ...requests.version_id import VersionIdParams + +CreateRunRequestVersionParams = typing.Union[VersionIdParams, FileIdParams, FilePathParams] diff --git a/src/humanloop/evaluations/types/__init__.py b/src/humanloop/evaluations/types/__init__.py new file mode 100644 index 00000000..508249fb --- /dev/null +++ b/src/humanloop/evaluations/types/__init__.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItem +from .create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItem +from .create_run_request_dataset import CreateRunRequestDataset +from .create_run_request_version import CreateRunRequestVersion + +__all__ = [ + "AddEvaluatorsRequestEvaluatorsItem", + "CreateEvaluationRequestEvaluatorsItem", + "CreateRunRequestDataset", + "CreateRunRequestVersion", +] diff --git a/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py b/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py new file mode 100644 index 00000000..3e4bbe23 --- /dev/null +++ b/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.evaluator_file_id import EvaluatorFileId +from ...types.evaluator_file_path import EvaluatorFilePath +from ...types.evaluator_version_id import EvaluatorVersionId + +AddEvaluatorsRequestEvaluatorsItem = typing.Union[EvaluatorVersionId, EvaluatorFileId, EvaluatorFilePath] diff --git a/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py b/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py new file mode 100644 index 00000000..448585eb --- /dev/null +++ b/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.evaluator_file_id import EvaluatorFileId +from ...types.evaluator_file_path import EvaluatorFilePath +from ...types.evaluator_version_id import EvaluatorVersionId + +CreateEvaluationRequestEvaluatorsItem = typing.Union[EvaluatorVersionId, EvaluatorFileId, EvaluatorFilePath] diff --git a/src/humanloop/evaluations/types/create_run_request_dataset.py b/src/humanloop/evaluations/types/create_run_request_dataset.py new file mode 100644 index 00000000..b915987e --- /dev/null +++ b/src/humanloop/evaluations/types/create_run_request_dataset.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.file_id import FileId +from ...types.file_path import FilePath +from ...types.version_id import VersionId + +CreateRunRequestDataset = typing.Union[VersionId, FileId, FilePath] diff --git a/src/humanloop/evaluations/types/create_run_request_version.py b/src/humanloop/evaluations/types/create_run_request_version.py new file mode 100644 index 00000000..6d383dd8 --- /dev/null +++ b/src/humanloop/evaluations/types/create_run_request_version.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.file_id import FileId +from ...types.file_path import FilePath +from ...types.version_id import VersionId + +CreateRunRequestVersion = typing.Union[VersionId, FileId, FilePath] diff --git a/src/humanloop/evaluators/__init__.py b/src/humanloop/evaluators/__init__.py new file mode 100644 index 00000000..480476b3 --- /dev/null +++ b/src/humanloop/evaluators/__init__.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import CreateEvaluatorLogRequestJudgment, CreateEvaluatorLogRequestSpec, EvaluatorRequestSpec +from .requests import ( + CreateEvaluatorLogRequestJudgmentParams, + CreateEvaluatorLogRequestSpecParams, + EvaluatorRequestSpecParams, +) + +__all__ = [ + "CreateEvaluatorLogRequestJudgment", + "CreateEvaluatorLogRequestJudgmentParams", + "CreateEvaluatorLogRequestSpec", + "CreateEvaluatorLogRequestSpecParams", + "EvaluatorRequestSpec", + "EvaluatorRequestSpecParams", +] diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py new file mode 100644 index 00000000..69fff10c --- /dev/null +++ b/src/humanloop/evaluators/client.py @@ -0,0 +1,1411 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse +from ..types.evaluator_response import EvaluatorResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.list_evaluators import ListEvaluators +from ..types.log_status import LogStatus +from ..types.sort_order import SortOrder +from .raw_client import AsyncRawEvaluatorsClient, RawEvaluatorsClient +from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams +from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams +from .requests.evaluator_request_spec import EvaluatorRequestSpecParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class EvaluatorsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawEvaluatorsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawEvaluatorsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawEvaluatorsClient + """ + return self._raw_client + + def log( + self, + *, + parent_id: str, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + create_evaluator_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, + marked_completed: typing.Optional[bool] = OMIT, + spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateEvaluatorLogResponse: + """ + Submit Evaluator judgment for an existing Log. + + Creates a new Log. The evaluated Log will be set as the parent of the created Log. + + Parameters + ---------- + parent_id : str + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + + version_id : typing.Optional[str] + ID of the Evaluator version to log against. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from the LLM. Only populated for LLM Evaluator Logs. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. Only populated for LLM Evaluator Logs. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. Only populated for LLM Evaluator Logs. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + create_evaluator_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the LLM. Only populated for LLM Evaluator Logs. + + judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] + Evaluator assessment of the Log. + + marked_completed : typing.Optional[bool] + Whether the Log has been manually marked as completed by a user. + + spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateEvaluatorLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.log(parent_id='parent_id', ) + """ + _response = self._raw_client.log( + parent_id=parent_id, + version_id=version_id, + environment=environment, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + create_evaluator_log_request_environment=create_evaluator_log_request_environment, + save=save, + log_id=log_id, + output_message=output_message, + judgment=judgment, + marked_completed=marked_completed, + spec=spec, + request_options=request_options, + ) + return _response.data + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[EvaluatorResponse]: + """ + Get a list of all Evaluators. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluators to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Evaluator name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Evaluators by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[EvaluatorResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.evaluators.list(size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + def upsert( + self, + *, + spec: EvaluatorRequestSpecParams, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Create an Evaluator or update it with a new version if it already exists. + + Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + spec : EvaluatorRequestSpecParams + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.upsert(path='Shared Evaluators/Accuracy Evaluator', spec={'arguments_type': "target_required", 'return_type': "number", 'evaluator_type': 'python', 'code': 'def evaluate(answer, target):\n return 0.5'}, version_name='simple-evaluator', version_description='Simple evaluator that returns 0.5', ) + """ + _response = self._raw_client.upsert( + spec=spec, + path=path, + id=id, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Retrieve the Evaluator with the given ID. + + By default, the deployed version of the Evaluator is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : typing.Optional[str] + A specific Version ID of the Evaluator to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.get(id='ev_890bcd', ) + """ + _response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Evaluator with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.delete(id='ev_890bcd', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Move the Evaluator to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + path : typing.Optional[str] + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Evaluator, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.move(id='ev_890bcd', path='new directory/new name', ) + """ + _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListEvaluators: + """ + Get a list of all the versions of an Evaluator. + + Parameters + ---------- + id : str + Unique identifier for the Evaluator. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListEvaluators + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.list_versions(id='ev_890bcd', ) + """ + _response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.delete_evaluator_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options) + return _response.data + + def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Update the name or description of the Evaluator version. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.update_evaluator_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.update_evaluator_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> EvaluatorResponse: + """ + Deploy Evaluator to an Environment. + + Set the deployed version for the specified Environment. This Evaluator + will be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.set_deployment(id='ev_890bcd', environment_id='staging', version_id='evv_012def', ) + """ + _response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Evaluator from the Environment. + + Remove the deployed version for the specified Environment. This Evaluator + will no longer be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.remove_deployment(id='ev_890bcd', environment_id='staging', ) + """ + _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.list_environments(id='ev_890bcd', ) + """ + _response = self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Activate and deactivate Evaluators for monitoring the Evaluator. + + An activated Evaluator will automatically be run on all new Logs + within the Evaluator for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.evaluators.update_monitoring(id='id', ) + """ + _response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + +class AsyncEvaluatorsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawEvaluatorsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawEvaluatorsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawEvaluatorsClient + """ + return self._raw_client + + async def log( + self, + *, + parent_id: str, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + create_evaluator_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, + marked_completed: typing.Optional[bool] = OMIT, + spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateEvaluatorLogResponse: + """ + Submit Evaluator judgment for an existing Log. + + Creates a new Log. The evaluated Log will be set as the parent of the created Log. + + Parameters + ---------- + parent_id : str + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + + version_id : typing.Optional[str] + ID of the Evaluator version to log against. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from the LLM. Only populated for LLM Evaluator Logs. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. Only populated for LLM Evaluator Logs. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. Only populated for LLM Evaluator Logs. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + create_evaluator_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the LLM. Only populated for LLM Evaluator Logs. + + judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] + Evaluator assessment of the Log. + + marked_completed : typing.Optional[bool] + Whether the Log has been manually marked as completed by a user. + + spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateEvaluatorLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.log(parent_id='parent_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.log( + parent_id=parent_id, + version_id=version_id, + environment=environment, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + create_evaluator_log_request_environment=create_evaluator_log_request_environment, + save=save, + log_id=log_id, + output_message=output_message, + judgment=judgment, + marked_completed=marked_completed, + spec=spec, + request_options=request_options, + ) + return _response.data + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[EvaluatorResponse]: + """ + Get a list of all Evaluators. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluators to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Evaluator name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Evaluators by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[EvaluatorResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.evaluators.list(size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + async def upsert( + self, + *, + spec: EvaluatorRequestSpecParams, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Create an Evaluator or update it with a new version if it already exists. + + Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + spec : EvaluatorRequestSpecParams + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.upsert(path='Shared Evaluators/Accuracy Evaluator', spec={'arguments_type': "target_required", 'return_type': "number", 'evaluator_type': 'python', 'code': 'def evaluate(answer, target):\n return 0.5'}, version_name='simple-evaluator', version_description='Simple evaluator that returns 0.5', ) + asyncio.run(main()) + """ + _response = await self._raw_client.upsert( + spec=spec, + path=path, + id=id, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Retrieve the Evaluator with the given ID. + + By default, the deployed version of the Evaluator is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : typing.Optional[str] + A specific Version ID of the Evaluator to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.get(id='ev_890bcd', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Evaluator with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.delete(id='ev_890bcd', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Move the Evaluator to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + path : typing.Optional[str] + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Evaluator, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.move(id='ev_890bcd', path='new directory/new name', ) + asyncio.run(main()) + """ + _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListEvaluators: + """ + Get a list of all the versions of an Evaluator. + + Parameters + ---------- + id : str + Unique identifier for the Evaluator. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListEvaluators + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.list_versions(id='ev_890bcd', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + async def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.delete_evaluator_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options) + return _response.data + + async def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Update the name or description of the Evaluator version. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.update_evaluator_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_evaluator_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> EvaluatorResponse: + """ + Deploy Evaluator to an Environment. + + Set the deployed version for the specified Environment. This Evaluator + will be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.set_deployment(id='ev_890bcd', environment_id='staging', version_id='evv_012def', ) + asyncio.run(main()) + """ + _response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Evaluator from the Environment. + + Remove the deployed version for the specified Environment. This Evaluator + will no longer be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.remove_deployment(id='ev_890bcd', environment_id='staging', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.list_environments(id='ev_890bcd', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: + """ + Activate and deactivate Evaluators for monitoring the Evaluator. + + An activated Evaluator will automatically be run on all new Logs + within the Evaluator for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EvaluatorResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.evaluators.update_monitoring(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data diff --git a/src/humanloop/evaluators/raw_client.py b/src/humanloop/evaluators/raw_client.py new file mode 100644 index 00000000..8aeb32bc --- /dev/null +++ b/src/humanloop/evaluators/raw_client.py @@ -0,0 +1,2014 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing +from json.decoder import JSONDecodeError + +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse +from ..types.evaluator_response import EvaluatorResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.http_validation_error import HttpValidationError +from ..types.list_evaluators import ListEvaluators +from ..types.log_status import LogStatus +from ..types.paginated_data_evaluator_response import PaginatedDataEvaluatorResponse +from ..types.sort_order import SortOrder +from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams +from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams +from .requests.evaluator_request_spec import EvaluatorRequestSpecParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawEvaluatorsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + parent_id: str, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + create_evaluator_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, + marked_completed: typing.Optional[bool] = OMIT, + spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateEvaluatorLogResponse]: + """ + Submit Evaluator judgment for an existing Log. + + Creates a new Log. The evaluated Log will be set as the parent of the created Log. + + Parameters + ---------- + parent_id : str + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + + version_id : typing.Optional[str] + ID of the Evaluator version to log against. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from the LLM. Only populated for LLM Evaluator Logs. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. Only populated for LLM Evaluator Logs. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. Only populated for LLM Evaluator Logs. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + create_evaluator_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the LLM. Only populated for LLM Evaluator Logs. + + judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] + Evaluator assessment of the Log. + + marked_completed : typing.Optional[bool] + Whether the Log has been manually marked as completed by a user. + + spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateEvaluatorLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "parent_id": parent_id, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": create_evaluator_log_request_environment, + "save": save, + "log_id": log_id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "judgment": convert_and_respect_annotation_metadata( + object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" + ), + "marked_completed": marked_completed, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateEvaluatorLogResponse, + construct_type( + type_=CreateEvaluatorLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[EvaluatorResponse]: + """ + Get a list of all Evaluators. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluators to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Evaluator name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Evaluators by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[EvaluatorResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "evaluators", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataEvaluatorResponse, + construct_type( + type_=PaginatedDataEvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def upsert( + self, + *, + spec: EvaluatorRequestSpecParams, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Create an Evaluator or update it with a new version if it already exists. + + Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + spec : EvaluatorRequestSpecParams + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators", + method="POST", + json={ + "path": path, + "id": id, + "version_name": version_name, + "version_description": version_description, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Retrieve the Evaluator with the given ID. + + By default, the deployed version of the Evaluator is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : typing.Optional[str] + A specific Version ID of the Evaluator to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Evaluator with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Move the Evaluator to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + path : typing.Optional[str] + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Evaluator, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListEvaluators]: + """ + Get a list of all the versions of an Evaluator. + + Parameters + ---------- + id : str + Unique identifier for the Evaluator. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListEvaluators] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListEvaluators, + construct_type( + type_=ListEvaluators, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Update the name or description of the Evaluator version. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluatorResponse]: + """ + Deploy Evaluator to an Environment. + + Set the deployed version for the specified Environment. This Evaluator + will be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Evaluator from the Environment. + + Remove the deployed version for the specified Environment. This Evaluator + will no longer be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Activate and deactivate Evaluators for monitoring the Evaluator. + + An activated Evaluator will automatically be run on all new Logs + within the Evaluator for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawEvaluatorsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + parent_id: str, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + create_evaluator_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, + marked_completed: typing.Optional[bool] = OMIT, + spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateEvaluatorLogResponse]: + """ + Submit Evaluator judgment for an existing Log. + + Creates a new Log. The evaluated Log will be set as the parent of the created Log. + + Parameters + ---------- + parent_id : str + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + + version_id : typing.Optional[str] + ID of the Evaluator version to log against. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from the LLM. Only populated for LLM Evaluator Logs. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. Only populated for LLM Evaluator Logs. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. Only populated for LLM Evaluator Logs. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + create_evaluator_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the LLM. Only populated for LLM Evaluator Logs. + + judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] + Evaluator assessment of the Log. + + marked_completed : typing.Optional[bool] + Whether the Log has been manually marked as completed by a user. + + spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateEvaluatorLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "parent_id": parent_id, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": create_evaluator_log_request_environment, + "save": save, + "log_id": log_id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "judgment": convert_and_respect_annotation_metadata( + object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" + ), + "marked_completed": marked_completed, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateEvaluatorLogResponse, + construct_type( + type_=CreateEvaluatorLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[EvaluatorResponse]: + """ + Get a list of all Evaluators. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Evaluators to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Evaluator name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Evaluators by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[EvaluatorResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "evaluators", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataEvaluatorResponse, + construct_type( + type_=PaginatedDataEvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def upsert( + self, + *, + spec: EvaluatorRequestSpecParams, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Create an Evaluator or update it with a new version if it already exists. + + Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + spec : EvaluatorRequestSpecParams + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators", + method="POST", + json={ + "path": path, + "id": id, + "version_name": version_name, + "version_description": version_description, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Retrieve the Evaluator with the given ID. + + By default, the deployed version of the Evaluator is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : typing.Optional[str] + A specific Version ID of the Evaluator to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Evaluator with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Move the Evaluator to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + path : typing.Optional[str] + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Evaluator, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListEvaluators]: + """ + Get a list of all the versions of an Evaluator. + + Parameters + ---------- + id : str + Unique identifier for the Evaluator. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListEvaluators] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListEvaluators, + construct_type( + type_=ListEvaluators, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Update the name or description of the Evaluator version. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Deploy Evaluator to an Environment. + + Set the deployed version for the specified Environment. This Evaluator + will be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Evaluator from the Environment. + + Remove the deployed version for the specified Environment. This Evaluator + will no longer be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Activate and deactivate Evaluators for monitoring the Evaluator. + + An activated Evaluator will automatically be run on all new Logs + within the Evaluator for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/evaluators/requests/__init__.py b/src/humanloop/evaluators/requests/__init__.py new file mode 100644 index 00000000..6a00390a --- /dev/null +++ b/src/humanloop/evaluators/requests/__init__.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams +from .create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams +from .evaluator_request_spec import EvaluatorRequestSpecParams + +__all__ = [ + "CreateEvaluatorLogRequestJudgmentParams", + "CreateEvaluatorLogRequestSpecParams", + "EvaluatorRequestSpecParams", +] diff --git a/src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py b/src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py new file mode 100644 index 00000000..680abe1f --- /dev/null +++ b/src/humanloop/evaluators/requests/create_evaluator_log_request_judgment.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateEvaluatorLogRequestJudgmentParams = typing.Union[bool, str, typing.Sequence[str], float] diff --git a/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py b/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py new file mode 100644 index 00000000..0e6539ed --- /dev/null +++ b/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.code_evaluator_request import CodeEvaluatorRequestParams +from ...requests.external_evaluator_request import ExternalEvaluatorRequestParams +from ...requests.human_evaluator_request import HumanEvaluatorRequestParams +from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams + +CreateEvaluatorLogRequestSpecParams = typing.Union[ + LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams +] diff --git a/src/humanloop/evaluators/requests/evaluator_request_spec.py b/src/humanloop/evaluators/requests/evaluator_request_spec.py new file mode 100644 index 00000000..7bd0d395 --- /dev/null +++ b/src/humanloop/evaluators/requests/evaluator_request_spec.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.code_evaluator_request import CodeEvaluatorRequestParams +from ...requests.external_evaluator_request import ExternalEvaluatorRequestParams +from ...requests.human_evaluator_request import HumanEvaluatorRequestParams +from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams + +EvaluatorRequestSpecParams = typing.Union[ + LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams +] diff --git a/src/humanloop/evaluators/types/__init__.py b/src/humanloop/evaluators/types/__init__.py new file mode 100644 index 00000000..09e95d81 --- /dev/null +++ b/src/humanloop/evaluators/types/__init__.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgment +from .create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpec +from .evaluator_request_spec import EvaluatorRequestSpec + +__all__ = ["CreateEvaluatorLogRequestJudgment", "CreateEvaluatorLogRequestSpec", "EvaluatorRequestSpec"] diff --git a/src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py b/src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py new file mode 100644 index 00000000..d3098335 --- /dev/null +++ b/src/humanloop/evaluators/types/create_evaluator_log_request_judgment.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateEvaluatorLogRequestJudgment = typing.Union[bool, str, typing.List[str], float] diff --git a/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py b/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py new file mode 100644 index 00000000..0f22560c --- /dev/null +++ b/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.code_evaluator_request import CodeEvaluatorRequest +from ...types.external_evaluator_request import ExternalEvaluatorRequest +from ...types.human_evaluator_request import HumanEvaluatorRequest +from ...types.llm_evaluator_request import LlmEvaluatorRequest + +CreateEvaluatorLogRequestSpec = typing.Union[ + LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest +] diff --git a/src/humanloop/evaluators/types/evaluator_request_spec.py b/src/humanloop/evaluators/types/evaluator_request_spec.py new file mode 100644 index 00000000..3f31af3f --- /dev/null +++ b/src/humanloop/evaluators/types/evaluator_request_spec.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.code_evaluator_request import CodeEvaluatorRequest +from ...types.external_evaluator_request import ExternalEvaluatorRequest +from ...types.human_evaluator_request import HumanEvaluatorRequest +from ...types.llm_evaluator_request import LlmEvaluatorRequest + +EvaluatorRequestSpec = typing.Union[ + LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest +] diff --git a/src/humanloop/files/__init__.py b/src/humanloop/files/__init__.py new file mode 100644 index 00000000..7b3a69b5 --- /dev/null +++ b/src/humanloop/files/__init__.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import RetrieveByPathFilesRetrieveByPathPostResponse +from .requests import RetrieveByPathFilesRetrieveByPathPostResponseParams + +__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponse", "RetrieveByPathFilesRetrieveByPathPostResponseParams"] diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py new file mode 100644 index 00000000..407ba0e9 --- /dev/null +++ b/src/humanloop/files/client.py @@ -0,0 +1,301 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.request_options import RequestOptions +from ..types.file_sort_by import FileSortBy +from ..types.file_type import FileType +from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, +) +from ..types.sort_order import SortOrder +from .raw_client import AsyncRawFilesClient, RawFilesClient +from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class FilesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawFilesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawFilesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawFilesClient + """ + return self._raw_client + + def list_files( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + path: typing.Optional[str] = None, + template: typing.Optional[bool] = None, + type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, + environment: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: + """ + Get a paginated list of files. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of files to fetch. + + name : typing.Optional[str] + Case-insensitive filter for file name. + + path : typing.Optional[str] + Path of the directory to filter for. Returns files in this directory and all its subdirectories. + + template : typing.Optional[bool] + Filter to include only template files. + + type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] + List of file types to filter for. + + environment : typing.Optional[str] + Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. + + sort_by : typing.Optional[FileSortBy] + Field to sort files by + + order : typing.Optional[SortOrder] + Direction to sort by. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.files.list_files() + """ + _response = self._raw_client.list_files( + page=page, + size=size, + name=name, + path=path, + template=template, + type=type, + environment=environment, + sort_by=sort_by, + order=order, + include_raw_file_content=include_raw_file_content, + request_options=request_options, + ) + return _response.data + + def retrieve_by_path( + self, + *, + path: str, + environment: typing.Optional[str] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RetrieveByPathFilesRetrieveByPathPostResponse: + """ + Retrieve a File by path. + + Parameters + ---------- + path : str + Path of the File to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RetrieveByPathFilesRetrieveByPathPostResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.files.retrieve_by_path(path='path', ) + """ + _response = self._raw_client.retrieve_by_path( + path=path, + environment=environment, + include_raw_file_content=include_raw_file_content, + request_options=request_options, + ) + return _response.data + + +class AsyncFilesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawFilesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawFilesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawFilesClient + """ + return self._raw_client + + async def list_files( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + path: typing.Optional[str] = None, + template: typing.Optional[bool] = None, + type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, + environment: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: + """ + Get a paginated list of files. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of files to fetch. + + name : typing.Optional[str] + Case-insensitive filter for file name. + + path : typing.Optional[str] + Path of the directory to filter for. Returns files in this directory and all its subdirectories. + + template : typing.Optional[bool] + Filter to include only template files. + + type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] + List of file types to filter for. + + environment : typing.Optional[str] + Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. + + sort_by : typing.Optional[FileSortBy] + Field to sort files by + + order : typing.Optional[SortOrder] + Direction to sort by. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.files.list_files() + asyncio.run(main()) + """ + _response = await self._raw_client.list_files( + page=page, + size=size, + name=name, + path=path, + template=template, + type=type, + environment=environment, + sort_by=sort_by, + order=order, + include_raw_file_content=include_raw_file_content, + request_options=request_options, + ) + return _response.data + + async def retrieve_by_path( + self, + *, + path: str, + environment: typing.Optional[str] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RetrieveByPathFilesRetrieveByPathPostResponse: + """ + Retrieve a File by path. + + Parameters + ---------- + path : str + Path of the File to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RetrieveByPathFilesRetrieveByPathPostResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.files.retrieve_by_path(path='path', ) + asyncio.run(main()) + """ + _response = await self._raw_client.retrieve_by_path( + path=path, + environment=environment, + include_raw_file_content=include_raw_file_content, + request_options=request_options, + ) + return _response.data diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py new file mode 100644 index 00000000..2f5f2d05 --- /dev/null +++ b/src/humanloop/files/raw_client.py @@ -0,0 +1,382 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.request_options import RequestOptions +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.file_sort_by import FileSortBy +from ..types.file_type import FileType +from ..types.http_validation_error import HttpValidationError +from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, +) +from ..types.sort_order import SortOrder +from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawFilesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list_files( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + path: typing.Optional[str] = None, + template: typing.Optional[bool] = None, + type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, + environment: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse + ]: + """ + Get a paginated list of files. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of files to fetch. + + name : typing.Optional[str] + Case-insensitive filter for file name. + + path : typing.Optional[str] + Path of the directory to filter for. Returns files in this directory and all its subdirectories. + + template : typing.Optional[bool] + Filter to include only template files. + + type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] + List of file types to filter for. + + environment : typing.Optional[str] + Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. + + sort_by : typing.Optional[FileSortBy] + Field to sort files by + + order : typing.Optional[SortOrder] + Direction to sort by. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "files", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "path": path, + "template": template, + "type": type, + "environment": environment, + "sort_by": sort_by, + "order": order, + "include_raw_file_content": include_raw_file_content, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, + construct_type( + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def retrieve_by_path( + self, + *, + path: str, + environment: typing.Optional[str] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]: + """ + Retrieve a File by path. + + Parameters + ---------- + path : str + Path of the File to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "files/retrieve-by-path", + method="POST", + params={ + "environment": environment, + "include_raw_file_content": include_raw_file_content, + }, + json={ + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + RetrieveByPathFilesRetrieveByPathPostResponse, + construct_type( + type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawFilesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list_files( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + path: typing.Optional[str] = None, + template: typing.Optional[bool] = None, + type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, + environment: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse + ]: + """ + Get a paginated list of files. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of files to fetch. + + name : typing.Optional[str] + Case-insensitive filter for file name. + + path : typing.Optional[str] + Path of the directory to filter for. Returns files in this directory and all its subdirectories. + + template : typing.Optional[bool] + Filter to include only template files. + + type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] + List of file types to filter for. + + environment : typing.Optional[str] + Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. + + sort_by : typing.Optional[FileSortBy] + Field to sort files by + + order : typing.Optional[SortOrder] + Direction to sort by. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "files", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "path": path, + "template": template, + "type": type, + "environment": environment, + "sort_by": sort_by, + "order": order, + "include_raw_file_content": include_raw_file_content, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, + construct_type( + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def retrieve_by_path( + self, + *, + path: str, + environment: typing.Optional[str] = None, + include_raw_file_content: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]: + """ + Retrieve a File by path. + + Parameters + ---------- + path : str + Path of the File to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_raw_file_content : typing.Optional[bool] + Whether to include the raw file content in the response. Currently only supported for Agents and Prompts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "files/retrieve-by-path", + method="POST", + params={ + "environment": environment, + "include_raw_file_content": include_raw_file_content, + }, + json={ + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + RetrieveByPathFilesRetrieveByPathPostResponse, + construct_type( + type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/files/requests/__init__.py b/src/humanloop/files/requests/__init__.py new file mode 100644 index 00000000..c4ae6bb0 --- /dev/null +++ b/src/humanloop/files/requests/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponseParams + +__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponseParams"] diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py new file mode 100644 index 00000000..20c1bef0 --- /dev/null +++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.agent_response import AgentResponseParams +from ...requests.dataset_response import DatasetResponseParams +from ...requests.evaluator_response import EvaluatorResponseParams +from ...requests.flow_response import FlowResponseParams +from ...requests.prompt_response import PromptResponseParams +from ...requests.tool_response import ToolResponseParams + +RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[ + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, +] diff --git a/src/humanloop/files/types/__init__.py b/src/humanloop/files/types/__init__.py new file mode 100644 index 00000000..c34673a3 --- /dev/null +++ b/src/humanloop/files/types/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse + +__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponse"] diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py new file mode 100644 index 00000000..c3dd6cb7 --- /dev/null +++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py @@ -0,0 +1,14 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.agent_response import AgentResponse +from ...types.dataset_response import DatasetResponse +from ...types.evaluator_response import EvaluatorResponse +from ...types.flow_response import FlowResponse +from ...types.prompt_response import PromptResponse +from ...types.tool_response import ToolResponse + +RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[ + PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse +] diff --git a/src/humanloop/flows/__init__.py b/src/humanloop/flows/__init__.py new file mode 100644 index 00000000..5cde0202 --- /dev/null +++ b/src/humanloop/flows/__init__.py @@ -0,0 +1,4 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py new file mode 100644 index 00000000..8fae2360 --- /dev/null +++ b/src/humanloop/flows/client.py @@ -0,0 +1,1583 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.flow_kernel_request import FlowKernelRequestParams +from ..types.create_flow_log_response import CreateFlowLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.flow_log_response import FlowLogResponse +from ..types.flow_response import FlowResponse +from ..types.list_flows import ListFlows +from ..types.log_status import LogStatus +from ..types.sort_order import SortOrder +from .raw_client import AsyncRawFlowsClient, RawFlowsClient + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class FlowsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawFlowsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawFlowsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawFlowsClient + """ + return self._raw_client + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + flow_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + flow: typing.Optional[FlowKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateFlowLogResponse: + """ + Log to a Flow. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Flow. Otherwise, the default deployed version will be chosen. + + If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Flow to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + start_time : typing.Optional[dt.datetime] + The start time of the Trace. Will be updated if a child Log with an earlier start time is added. + + end_time : typing.Optional[dt.datetime] + The end time of the Trace. Will be updated if a child Log with a later end time is added. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + flow_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + flow : typing.Optional[FlowKernelRequestParams] + Flow used to generate the Trace. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateFlowLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + import datetime + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {'template': 'You are a helpful assistant helping with medical anamnesis', 'model': 'gpt-4o', 'temperature': 0.8} + , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} + }}, inputs={'question': 'Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath.' + }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="incomplete", start_time=datetime.datetime.fromisoformat("2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat("2024-07-08 21:40:39+00:00", ), ) + """ + _response = self._raw_client.log( + version_id=version_id, + environment=environment, + messages=messages, + output_message=output_message, + run_id=run_id, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + flow_log_request_environment=flow_log_request_environment, + save=save, + log_id=log_id, + flow=flow, + request_options=request_options, + ) + return _response.data + + def update_log( + self, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowLogResponse: + """ + Update the status, inputs, output of a Flow Log. + + Marking a Flow Log as complete will trigger any monitoring Evaluators to run. + Inputs and output (or error) must be provided in order to mark it as complete. + + The end_time log attribute will be set to match the time the log is marked as complete. + + Parameters + ---------- + log_id : str + Unique identifier of the Flow Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath.' + }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="complete", ) + """ + _response = self._raw_client.update_log( + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Retrieve the Flow with the given ID. + + By default, the deployed version of the Flow is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : typing.Optional[str] + A specific Version ID of the Flow to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.get(id='fl_6o701g4jmcanPVHxdqD0O', ) + """ + _response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Flow with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.delete(id='fl_6o701g4jmcanPVHxdqD0O', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Move the Flow to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.move(id='fl_6o701g4jmcanPVHxdqD0O', path='new directory/new name', ) + """ + _response = self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options + ) + return _response.data + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[FlowResponse]: + """ + Get a list of Flows. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Flows to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Flow name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Flows by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[FlowResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.flows.list(size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + def upsert( + self, + *, + attributes: typing.Dict[str, typing.Optional[typing.Any]], + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Create or update a Flow. + + Flows can also be identified by the `ID` or their `path`. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + attributes : typing.Dict[str, typing.Optional[typing.Any]] + A key-value object identifying the Flow Version. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.upsert(path='Personal Projects/MedQA Flow', attributes={'prompt': {'template': 'You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}', 'model': 'gpt-4o', 'temperature': 0.8} + , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} + , 'version_name': 'medqa-flow-v1' + , 'version_description': 'Initial version' + }, ) + """ + _response = self._raw_client.upsert( + attributes=attributes, + path=path, + id=id, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListFlows: + """ + Get a list of all the versions of a Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListFlows + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.list_versions(id='fl_6o701g4jmcanPVHxdqD0O', ) + """ + _response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.delete_flow_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.delete_flow_version(id, version_id, request_options=request_options) + return _response.data + + def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Update the name or description of the Flow version. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.update_flow_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.update_flow_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> FlowResponse: + """ + Deploy Flow to an Environment. + + Set the deployed version for the specified Environment. This Flow + will be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.set_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', version_id='flv_6o701g4jmcanPVHxdqD0O', ) + """ + _response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Flow from the Environment. + + Remove the deployed version for the specified Environment. This Flow + will no longer be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.remove_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', ) + """ + _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.list_environments(id='fl_6o701g4jmcanPVHxdqD0O', ) + """ + _response = self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Activate and deactivate Evaluators for monitoring the Flow. + + An activated Evaluator will automatically be run on all new "completed" Logs + within the Flow for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.flows.update_monitoring(id='fl_6o701g4jmcanPVHxdqD0O', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) + """ + _response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + +class AsyncFlowsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawFlowsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawFlowsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawFlowsClient + """ + return self._raw_client + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + flow_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + flow: typing.Optional[FlowKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateFlowLogResponse: + """ + Log to a Flow. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Flow. Otherwise, the default deployed version will be chosen. + + If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Flow to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + start_time : typing.Optional[dt.datetime] + The start time of the Trace. Will be updated if a child Log with an earlier start time is added. + + end_time : typing.Optional[dt.datetime] + The end time of the Trace. Will be updated if a child Log with a later end time is added. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + flow_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + flow : typing.Optional[FlowKernelRequestParams] + Flow used to generate the Trace. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateFlowLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import datetime + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {'template': 'You are a helpful assistant helping with medical anamnesis', 'model': 'gpt-4o', 'temperature': 0.8} + , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} + }}, inputs={'question': 'Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath.' + }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="incomplete", start_time=datetime.datetime.fromisoformat("2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat("2024-07-08 21:40:39+00:00", ), ) + asyncio.run(main()) + """ + _response = await self._raw_client.log( + version_id=version_id, + environment=environment, + messages=messages, + output_message=output_message, + run_id=run_id, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + flow_log_request_environment=flow_log_request_environment, + save=save, + log_id=log_id, + flow=flow, + request_options=request_options, + ) + return _response.data + + async def update_log( + self, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowLogResponse: + """ + Update the status, inputs, output of a Flow Log. + + Marking a Flow Log as complete will trigger any monitoring Evaluators to run. + Inputs and output (or error) must be provided in order to mark it as complete. + + The end_time log attribute will be set to match the time the log is marked as complete. + + Parameters + ---------- + log_id : str + Unique identifier of the Flow Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath.' + }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="complete", ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_log( + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Retrieve the Flow with the given ID. + + By default, the deployed version of the Flow is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : typing.Optional[str] + A specific Version ID of the Flow to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.get(id='fl_6o701g4jmcanPVHxdqD0O', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Flow with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.delete(id='fl_6o701g4jmcanPVHxdqD0O', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Move the Flow to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.move(id='fl_6o701g4jmcanPVHxdqD0O', path='new directory/new name', ) + asyncio.run(main()) + """ + _response = await self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options + ) + return _response.data + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[FlowResponse]: + """ + Get a list of Flows. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Flows to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Flow name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Flows by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[FlowResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.flows.list(size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + async def upsert( + self, + *, + attributes: typing.Dict[str, typing.Optional[typing.Any]], + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Create or update a Flow. + + Flows can also be identified by the `ID` or their `path`. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + attributes : typing.Dict[str, typing.Optional[typing.Any]] + A key-value object identifying the Flow Version. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.upsert(path='Personal Projects/MedQA Flow', attributes={'prompt': {'template': 'You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}', 'model': 'gpt-4o', 'temperature': 0.8} + , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'} + , 'version_name': 'medqa-flow-v1' + , 'version_description': 'Initial version' + }, ) + asyncio.run(main()) + """ + _response = await self._raw_client.upsert( + attributes=attributes, + path=path, + id=id, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListFlows: + """ + Get a list of all the versions of a Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListFlows + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.list_versions(id='fl_6o701g4jmcanPVHxdqD0O', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + async def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.delete_flow_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete_flow_version(id, version_id, request_options=request_options) + return _response.data + + async def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Update the name or description of the Flow version. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.update_flow_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_flow_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> FlowResponse: + """ + Deploy Flow to an Environment. + + Set the deployed version for the specified Environment. This Flow + will be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.set_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', version_id='flv_6o701g4jmcanPVHxdqD0O', ) + asyncio.run(main()) + """ + _response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Flow from the Environment. + + Remove the deployed version for the specified Environment. This Flow + will no longer be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.remove_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.list_environments(id='fl_6o701g4jmcanPVHxdqD0O', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: + """ + Activate and deactivate Evaluators for monitoring the Flow. + + An activated Evaluator will automatically be run on all new "completed" Logs + within the Flow for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + FlowResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.flows.update_monitoring(id='fl_6o701g4jmcanPVHxdqD0O', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data diff --git a/src/humanloop/flows/raw_client.py b/src/humanloop/flows/raw_client.py new file mode 100644 index 00000000..e3954572 --- /dev/null +++ b/src/humanloop/flows/raw_client.py @@ -0,0 +1,2217 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing +from json.decoder import JSONDecodeError + +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.flow_kernel_request import FlowKernelRequestParams +from ..types.create_flow_log_response import CreateFlowLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.flow_log_response import FlowLogResponse +from ..types.flow_response import FlowResponse +from ..types.http_validation_error import HttpValidationError +from ..types.list_flows import ListFlows +from ..types.log_status import LogStatus +from ..types.paginated_data_flow_response import PaginatedDataFlowResponse +from ..types.sort_order import SortOrder + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawFlowsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + flow_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + flow: typing.Optional[FlowKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateFlowLogResponse]: + """ + Log to a Flow. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Flow. Otherwise, the default deployed version will be chosen. + + If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Flow to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + start_time : typing.Optional[dt.datetime] + The start time of the Trace. Will be updated if a child Log with an earlier start time is added. + + end_time : typing.Optional[dt.datetime] + The end time of the Trace. Will be updated if a child Log with a later end time is added. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + flow_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + flow : typing.Optional[FlowKernelRequestParams] + Flow used to generate the Trace. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateFlowLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "flows/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "run_id": run_id, + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": flow_log_request_environment, + "save": save, + "log_id": log_id, + "flow": convert_and_respect_annotation_metadata( + object_=flow, annotation=FlowKernelRequestParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateFlowLogResponse, + construct_type( + type_=CreateFlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_log( + self, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowLogResponse]: + """ + Update the status, inputs, output of a Flow Log. + + Marking a Flow Log as complete will trigger any monitoring Evaluators to run. + Inputs and output (or error) must be provided in order to mark it as complete. + + The end_time log attribute will be set to match the time the log is marked as complete. + + Parameters + ---------- + log_id : str + Unique identifier of the Flow Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/logs/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowLogResponse, + construct_type( + type_=FlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Retrieve the Flow with the given ID. + + By default, the deployed version of the Flow is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : typing.Optional[str] + A specific Version ID of the Flow to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Flow with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Move the Flow to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[FlowResponse]: + """ + Get a list of Flows. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Flows to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Flow name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Flows by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[FlowResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "flows", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataFlowResponse, + construct_type( + type_=PaginatedDataFlowResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def upsert( + self, + *, + attributes: typing.Dict[str, typing.Optional[typing.Any]], + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Create or update a Flow. + + Flows can also be identified by the `ID` or their `path`. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + attributes : typing.Dict[str, typing.Optional[typing.Any]] + A key-value object identifying the Flow Version. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "flows", + method="POST", + json={ + "path": path, + "id": id, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListFlows]: + """ + Get a list of all the versions of a Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListFlows] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListFlows, + construct_type( + type_=ListFlows, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Update the name or description of the Flow version. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[FlowResponse]: + """ + Deploy Flow to an Environment. + + Set the deployed version for the specified Environment. This Flow + will be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Flow from the Environment. + + Remove the deployed version for the specified Environment. This Flow + will no longer be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Activate and deactivate Evaluators for monitoring the Flow. + + An activated Evaluator will automatically be run on all new "completed" Logs + within the Flow for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawFlowsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + flow_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + flow: typing.Optional[FlowKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateFlowLogResponse]: + """ + Log to a Flow. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Flow. Otherwise, the default deployed version will be chosen. + + If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Flow to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + start_time : typing.Optional[dt.datetime] + The start time of the Trace. Will be updated if a child Log with an earlier start time is added. + + end_time : typing.Optional[dt.datetime] + The end time of the Trace. Will be updated if a child Log with a later end time is added. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + flow_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + flow : typing.Optional[FlowKernelRequestParams] + Flow used to generate the Trace. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateFlowLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "flows/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "run_id": run_id, + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": flow_log_request_environment, + "save": save, + "log_id": log_id, + "flow": convert_and_respect_annotation_metadata( + object_=flow, annotation=FlowKernelRequestParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateFlowLogResponse, + construct_type( + type_=CreateFlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_log( + self, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowLogResponse]: + """ + Update the status, inputs, output of a Flow Log. + + Marking a Flow Log as complete will trigger any monitoring Evaluators to run. + Inputs and output (or error) must be provided in order to mark it as complete. + + The end_time log attribute will be set to match the time the log is marked as complete. + + Parameters + ---------- + log_id : str + Unique identifier of the Flow Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/logs/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowLogResponse, + construct_type( + type_=FlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Retrieve the Flow with the given ID. + + By default, the deployed version of the Flow is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : typing.Optional[str] + A specific Version ID of the Flow to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Flow with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Move the Flow to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[FlowResponse]: + """ + Get a list of Flows. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Flows to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Flow name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Flows by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[FlowResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "flows", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataFlowResponse, + construct_type( + type_=PaginatedDataFlowResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def upsert( + self, + *, + attributes: typing.Dict[str, typing.Optional[typing.Any]], + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Create or update a Flow. + + Flows can also be identified by the `ID` or their `path`. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + attributes : typing.Dict[str, typing.Optional[typing.Any]] + A key-value object identifying the Flow Version. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "flows", + method="POST", + json={ + "path": path, + "id": id, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListFlows]: + """ + Get a list of all the versions of a Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListFlows] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListFlows, + construct_type( + type_=ListFlows, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Update the name or description of the Flow version. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[FlowResponse]: + """ + Deploy Flow to an Environment. + + Set the deployed version for the specified Environment. This Flow + will be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Flow from the Environment. + + Remove the deployed version for the specified Environment. This Flow + will no longer be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Activate and deactivate Evaluators for monitoring the Flow. + + An activated Evaluator will automatically be run on all new "completed" Logs + within the Flow for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/logs/__init__.py b/src/humanloop/logs/__init__.py new file mode 100644 index 00000000..5cde0202 --- /dev/null +++ b/src/humanloop/logs/__init__.py @@ -0,0 +1,4 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py new file mode 100644 index 00000000..278c97cf --- /dev/null +++ b/src/humanloop/logs/client.py @@ -0,0 +1,360 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..types.log_response import LogResponse +from .raw_client import AsyncRawLogsClient, RawLogsClient + + +class LogsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawLogsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawLogsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawLogsClient + """ + return self._raw_client + + def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + version_id: typing.Optional[str] = None, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + search: typing.Optional[str] = None, + metadata_search: typing.Optional[str] = None, + start_date: typing.Optional[dt.datetime] = None, + end_date: typing.Optional[dt.datetime] = None, + include_parent: typing.Optional[bool] = None, + in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, + sample: typing.Optional[int] = None, + include_trace_children: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[LogResponse]: + """ + List all Logs for the given filter criteria. + + Parameters + ---------- + file_id : str + Unique identifier for the File to list Logs for. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + version_id : typing.Optional[str] + If provided, only Logs belonging to the specified Version will be returned. + + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + If provided, returns Logs whose IDs contain any of the specified values as substrings. + + search : typing.Optional[str] + If provided, only Logs that contain the provided string in its inputs and output will be returned. + + metadata_search : typing.Optional[str] + If provided, only Logs that contain the provided string in its metadata will be returned. + + start_date : typing.Optional[dt.datetime] + If provided, only Logs created after the specified date will be returned. + + end_date : typing.Optional[dt.datetime] + If provided, only Logs created before the specified date will be returned. + + include_parent : typing.Optional[bool] + If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. + + in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] + If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. + + sample : typing.Optional[int] + If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[LogResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.logs.list(file_id='file_123abc', size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list( + file_id=file_id, + page=page, + size=size, + version_id=version_id, + id=id, + search=search, + metadata_search=metadata_search, + start_date=start_date, + end_date=end_date, + include_parent=include_parent, + in_trace_filter=in_trace_filter, + sample=sample, + include_trace_children=include_trace_children, + request_options=request_options, + ) + + def delete( + self, + *, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Delete Logs with the given IDs. + + Parameters + ---------- + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Unique identifiers for the Logs to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.logs.delete(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) + """ + _response = self._raw_client.delete(id=id, request_options=request_options) + return _response.data + + def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse: + """ + Retrieve the Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Log. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.logs.get(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) + """ + _response = self._raw_client.get(id, request_options=request_options) + return _response.data + + +class AsyncLogsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawLogsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawLogsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawLogsClient + """ + return self._raw_client + + async def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + version_id: typing.Optional[str] = None, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + search: typing.Optional[str] = None, + metadata_search: typing.Optional[str] = None, + start_date: typing.Optional[dt.datetime] = None, + end_date: typing.Optional[dt.datetime] = None, + include_parent: typing.Optional[bool] = None, + in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, + sample: typing.Optional[int] = None, + include_trace_children: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[LogResponse]: + """ + List all Logs for the given filter criteria. + + Parameters + ---------- + file_id : str + Unique identifier for the File to list Logs for. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + version_id : typing.Optional[str] + If provided, only Logs belonging to the specified Version will be returned. + + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + If provided, returns Logs whose IDs contain any of the specified values as substrings. + + search : typing.Optional[str] + If provided, only Logs that contain the provided string in its inputs and output will be returned. + + metadata_search : typing.Optional[str] + If provided, only Logs that contain the provided string in its metadata will be returned. + + start_date : typing.Optional[dt.datetime] + If provided, only Logs created after the specified date will be returned. + + end_date : typing.Optional[dt.datetime] + If provided, only Logs created before the specified date will be returned. + + include_parent : typing.Optional[bool] + If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. + + in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] + If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. + + sample : typing.Optional[int] + If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[LogResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.logs.list(file_id='file_123abc', size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list( + file_id=file_id, + page=page, + size=size, + version_id=version_id, + id=id, + search=search, + metadata_search=metadata_search, + start_date=start_date, + end_date=end_date, + include_parent=include_parent, + in_trace_filter=in_trace_filter, + sample=sample, + include_trace_children=include_trace_children, + request_options=request_options, + ) + + async def delete( + self, + *, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Delete Logs with the given IDs. + + Parameters + ---------- + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Unique identifiers for the Logs to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.logs.delete(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id=id, request_options=request_options) + return _response.data + + async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse: + """ + Retrieve the Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Log. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.logs.get(id='prv_Wu6zx1lAWJRqOyL8nWuZk', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get(id, request_options=request_options) + return _response.data diff --git a/src/humanloop/logs/raw_client.py b/src/humanloop/logs/raw_client.py new file mode 100644 index 00000000..e155be92 --- /dev/null +++ b/src/humanloop/logs/raw_client.py @@ -0,0 +1,501 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing +from json.decoder import JSONDecodeError + +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.datetime_utils import serialize_datetime +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..types.log_response import LogResponse +from ..types.paginated_data_log_response import PaginatedDataLogResponse + + +class RawLogsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + version_id: typing.Optional[str] = None, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + search: typing.Optional[str] = None, + metadata_search: typing.Optional[str] = None, + start_date: typing.Optional[dt.datetime] = None, + end_date: typing.Optional[dt.datetime] = None, + include_parent: typing.Optional[bool] = None, + in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, + sample: typing.Optional[int] = None, + include_trace_children: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[LogResponse]: + """ + List all Logs for the given filter criteria. + + Parameters + ---------- + file_id : str + Unique identifier for the File to list Logs for. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + version_id : typing.Optional[str] + If provided, only Logs belonging to the specified Version will be returned. + + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + If provided, returns Logs whose IDs contain any of the specified values as substrings. + + search : typing.Optional[str] + If provided, only Logs that contain the provided string in its inputs and output will be returned. + + metadata_search : typing.Optional[str] + If provided, only Logs that contain the provided string in its metadata will be returned. + + start_date : typing.Optional[dt.datetime] + If provided, only Logs created after the specified date will be returned. + + end_date : typing.Optional[dt.datetime] + If provided, only Logs created before the specified date will be returned. + + include_parent : typing.Optional[bool] + If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. + + in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] + If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. + + sample : typing.Optional[int] + If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[LogResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "logs", + method="GET", + params={ + "file_id": file_id, + "page": page, + "size": size, + "version_id": version_id, + "id": id, + "search": search, + "metadata_search": metadata_search, + "start_date": serialize_datetime(start_date) if start_date is not None else None, + "end_date": serialize_datetime(end_date) if end_date is not None else None, + "include_parent": include_parent, + "in_trace_filter": in_trace_filter, + "sample": sample, + "include_trace_children": include_trace_children, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataLogResponse, + construct_type( + type_=PaginatedDataLogResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + file_id=file_id, + page=page + 1, + size=size, + version_id=version_id, + id=id, + search=search, + metadata_search=metadata_search, + start_date=start_date, + end_date=end_date, + include_parent=include_parent, + in_trace_filter=in_trace_filter, + sample=sample, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete( + self, + *, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[None]: + """ + Delete Logs with the given IDs. + + Parameters + ---------- + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Unique identifiers for the Logs to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + "logs", + method="DELETE", + params={ + "id": id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[LogResponse]: + """ + Retrieve the Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Log. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[LogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"logs/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawLogsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, + *, + file_id: str, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + version_id: typing.Optional[str] = None, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + search: typing.Optional[str] = None, + metadata_search: typing.Optional[str] = None, + start_date: typing.Optional[dt.datetime] = None, + end_date: typing.Optional[dt.datetime] = None, + include_parent: typing.Optional[bool] = None, + in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, + sample: typing.Optional[int] = None, + include_trace_children: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[LogResponse]: + """ + List all Logs for the given filter criteria. + + Parameters + ---------- + file_id : str + Unique identifier for the File to list Logs for. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + version_id : typing.Optional[str] + If provided, only Logs belonging to the specified Version will be returned. + + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + If provided, returns Logs whose IDs contain any of the specified values as substrings. + + search : typing.Optional[str] + If provided, only Logs that contain the provided string in its inputs and output will be returned. + + metadata_search : typing.Optional[str] + If provided, only Logs that contain the provided string in its metadata will be returned. + + start_date : typing.Optional[dt.datetime] + If provided, only Logs created after the specified date will be returned. + + end_date : typing.Optional[dt.datetime] + If provided, only Logs created before the specified date will be returned. + + include_parent : typing.Optional[bool] + If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. + + in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] + If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. + + sample : typing.Optional[int] + If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[LogResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "logs", + method="GET", + params={ + "file_id": file_id, + "page": page, + "size": size, + "version_id": version_id, + "id": id, + "search": search, + "metadata_search": metadata_search, + "start_date": serialize_datetime(start_date) if start_date is not None else None, + "end_date": serialize_datetime(end_date) if end_date is not None else None, + "include_parent": include_parent, + "in_trace_filter": in_trace_filter, + "sample": sample, + "include_trace_children": include_trace_children, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataLogResponse, + construct_type( + type_=PaginatedDataLogResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + file_id=file_id, + page=page + 1, + size=size, + version_id=version_id, + id=id, + search=search, + metadata_search=metadata_search, + start_date=start_date, + end_date=end_date, + include_parent=include_parent, + in_trace_filter=in_trace_filter, + sample=sample, + include_trace_children=include_trace_children, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, + *, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[None]: + """ + Delete Logs with the given IDs. + + Parameters + ---------- + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Unique identifiers for the Logs to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + "logs", + method="DELETE", + params={ + "id": id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[LogResponse]: + """ + Retrieve the Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Log. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[LogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"logs/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py new file mode 100644 index 00000000..dcff7e62 --- /dev/null +++ b/src/humanloop/prompts/__init__.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .types import ( + PromptLogRequestPrompt, + PromptLogRequestToolChoice, + PromptLogUpdateRequestToolChoice, + PromptRequestReasoningEffort, + PromptRequestStop, + PromptRequestTemplate, + PromptsCallRequestPrompt, + PromptsCallRequestToolChoice, + PromptsCallStreamRequestPrompt, + PromptsCallStreamRequestToolChoice, +) +from .requests import ( + PromptLogRequestPromptParams, + PromptLogRequestToolChoiceParams, + PromptLogUpdateRequestToolChoiceParams, + PromptRequestReasoningEffortParams, + PromptRequestStopParams, + PromptRequestTemplateParams, + PromptsCallRequestPromptParams, + PromptsCallRequestToolChoiceParams, + PromptsCallStreamRequestPromptParams, + PromptsCallStreamRequestToolChoiceParams, +) + +__all__ = [ + "PromptLogRequestPrompt", + "PromptLogRequestPromptParams", + "PromptLogRequestToolChoice", + "PromptLogRequestToolChoiceParams", + "PromptLogUpdateRequestToolChoice", + "PromptLogUpdateRequestToolChoiceParams", + "PromptRequestReasoningEffort", + "PromptRequestReasoningEffortParams", + "PromptRequestStop", + "PromptRequestStopParams", + "PromptRequestTemplate", + "PromptRequestTemplateParams", + "PromptsCallRequestPrompt", + "PromptsCallRequestPromptParams", + "PromptsCallRequestToolChoice", + "PromptsCallRequestToolChoiceParams", + "PromptsCallStreamRequestPrompt", + "PromptsCallStreamRequestPromptParams", + "PromptsCallStreamRequestToolChoice", + "PromptsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py new file mode 100644 index 00000000..cd772a17 --- /dev/null +++ b/src/humanloop/prompts/client.py @@ -0,0 +1,2990 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..requests.response_format import ResponseFormatParams +from ..requests.tool_function import ToolFunctionParams +from ..types.create_prompt_log_response import CreatePromptLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.list_prompts import ListPrompts +from ..types.log_response import LogResponse +from ..types.log_status import LogStatus +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.populate_template_response import PopulateTemplateResponse +from ..types.prompt_call_response import PromptCallResponse +from ..types.prompt_call_stream_response import PromptCallStreamResponse +from ..types.prompt_kernel_request import PromptKernelRequest +from ..types.prompt_response import PromptResponse +from ..types.sort_order import SortOrder +from ..types.template_language import TemplateLanguage +from .raw_client import AsyncRawPromptsClient, RawPromptsClient +from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams +from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams +from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams +from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams +from .requests.prompt_request_stop import PromptRequestStopParams +from .requests.prompt_request_template import PromptRequestTemplateParams +from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams +from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams +from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams +from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class PromptsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawPromptsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawPromptsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawPromptsClient + """ + return self._raw_client + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompt_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreatePromptLogResponse: + """ + Log to a Prompt. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptLogRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompt_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreatePromptLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + import datetime + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump' + }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', ) + """ + _response = self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompt_log_request_environment=prompt_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + def update_log( + self, + id: str, + log_id: str, + *, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.update_log(id='id', log_id='log_id', ) + """ + _response = self._raw_client.update_log( + id, + log_id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[PromptCallStreamResponse]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallStreamRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[PromptCallStreamResponse] + + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.prompts.call_stream() + for chunk in response: + yield chunk + """ + with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_stream_request_environment=prompts_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, + request_options=request_options, + ) as r: + yield from r.data + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptCallResponse: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptCallResponse + + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object' + , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}} + , 'required': [] + }}]}, messages=[{'role': "user", 'content': 'latest apple'}], ) + """ + _response = self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_request_environment=prompts_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, + request_options=request_options, + ) + return _response.data + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[PromptResponse]: + """ + Get a list of all Prompts. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Prompts to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Prompt name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Prompts by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[PromptResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.prompts.list(size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[PromptRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[PromptRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, + linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Create a Prompt or update it with a new version if it already exists. + + Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[PromptRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[PromptRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[ToolFunctionParams]] + The tool specification that the model can choose to call if Tool calling is supported. + + linked_tools : typing.Optional[typing.Sequence[str]] + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.upsert(path='Personal Projects/Coding Assistant', model='gpt-4o', endpoint="chat", template=[{'content': 'You are a helpful coding assistant specialising in {{language}}', 'role': "system"}], provider="openai", max_tokens=-1, temperature=0.7, version_name='coding-assistant-v1', version_description='Initial version', ) + """ + _response = self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + linked_tools=linked_tools, + attributes=attributes, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, + request_options=request_options, + ) + return _response.data + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Retrieve the Prompt with the given ID. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.get(id='pr_30gco7dx6JDq4200GVOHa', ) + """ + _response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Prompt with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.delete(id='pr_30gco7dx6JDq4200GVOHa', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Move the Prompt to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + path : typing.Optional[str] + Path of the Prompt including the Prompt name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.move(id='pr_30gco7dx6JDq4200GVOHa', path='new directory/new name', ) + """ + _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + def populate( + self, + id: str, + *, + request: typing.Dict[str, typing.Optional[typing.Any]], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PopulateTemplateResponse: + """ + Retrieve the Prompt with the given ID, including the populated template. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request : typing.Dict[str, typing.Optional[typing.Any]] + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve to populate the template. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from to populate the template. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PopulateTemplateResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.populate(id='id', request={'key': 'value' + }, ) + """ + _response = self._raw_client.populate( + id, request=request, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListPrompts: + """ + Get a list of all the versions of a Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListPrompts + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.list_versions(id='pr_30gco7dx6JDq4200GVOHa', ) + """ + _response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.delete_prompt_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.delete_prompt_version(id, version_id, request_options=request_options) + return _response.data + + def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Update the name or description of the Prompt version. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.patch_prompt_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.patch_prompt_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> PromptResponse: + """ + Deploy Prompt to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) + """ + _response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Prompt from the Environment. + + Remove the deployed version for the specified Environment. This Prompt + will no longer be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.remove_deployment(id='id', environment_id='environment_id', ) + """ + _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.list_environments(id='pr_30gco7dx6JDq4200GVOHa', ) + """ + _response = self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Activate and deactivate Evaluators for monitoring the Prompt. + + An activated Evaluator will automatically be run on all new Logs + within the Prompt for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.update_monitoring(id='pr_30gco7dx6JDq4200GVOHa', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) + """ + _response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.serialize(id='id', ) + """ + _response = self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> PromptKernelRequest: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptKernelRequest + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.prompts.deserialize(prompt='prompt', ) + """ + _response = self._raw_client.deserialize(prompt=prompt, request_options=request_options) + return _response.data + + +class AsyncPromptsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawPromptsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawPromptsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawPromptsClient + """ + return self._raw_client + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompt_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreatePromptLogResponse: + """ + Log to a Prompt. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptLogRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompt_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreatePromptLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import datetime + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump' + }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', ) + asyncio.run(main()) + """ + _response = await self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompt_log_request_environment=prompt_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + async def update_log( + self, + id: str, + log_id: str, + *, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.update_log(id='id', log_id='log_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_log( + id, + log_id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[PromptCallStreamResponse]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallStreamRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[PromptCallStreamResponse] + + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.prompts.call_stream() + async for chunk in response: + yield chunk + asyncio.run(main()) + """ + async with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_stream_request_environment=prompts_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, + request_options=request_options, + ) as r: + async for data in r.data: + yield data + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptCallResponse: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptCallResponse + + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object' + , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}} + , 'required': [] + }}]}, messages=[{'role': "user", 'content': 'latest apple'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_request_environment=prompts_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, + request_options=request_options, + ) + return _response.data + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[PromptResponse]: + """ + Get a list of all Prompts. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Prompts to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Prompt name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Prompts by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[PromptResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.prompts.list(size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[PromptRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[PromptRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, + linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Create a Prompt or update it with a new version if it already exists. + + Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[PromptRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[PromptRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[ToolFunctionParams]] + The tool specification that the model can choose to call if Tool calling is supported. + + linked_tools : typing.Optional[typing.Sequence[str]] + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.upsert(path='Personal Projects/Coding Assistant', model='gpt-4o', endpoint="chat", template=[{'content': 'You are a helpful coding assistant specialising in {{language}}', 'role': "system"}], provider="openai", max_tokens=-1, temperature=0.7, version_name='coding-assistant-v1', version_description='Initial version', ) + asyncio.run(main()) + """ + _response = await self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + linked_tools=linked_tools, + attributes=attributes, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, + request_options=request_options, + ) + return _response.data + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Retrieve the Prompt with the given ID. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.get(id='pr_30gco7dx6JDq4200GVOHa', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Prompt with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.delete(id='pr_30gco7dx6JDq4200GVOHa', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Move the Prompt to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + path : typing.Optional[str] + Path of the Prompt including the Prompt name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.move(id='pr_30gco7dx6JDq4200GVOHa', path='new directory/new name', ) + asyncio.run(main()) + """ + _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + async def populate( + self, + id: str, + *, + request: typing.Dict[str, typing.Optional[typing.Any]], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PopulateTemplateResponse: + """ + Retrieve the Prompt with the given ID, including the populated template. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request : typing.Dict[str, typing.Optional[typing.Any]] + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve to populate the template. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from to populate the template. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PopulateTemplateResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.populate(id='id', request={'key': 'value' + }, ) + asyncio.run(main()) + """ + _response = await self._raw_client.populate( + id, request=request, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListPrompts: + """ + Get a list of all the versions of a Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListPrompts + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.list_versions(id='pr_30gco7dx6JDq4200GVOHa', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + async def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.delete_prompt_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete_prompt_version(id, version_id, request_options=request_options) + return _response.data + + async def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Update the name or description of the Prompt version. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.patch_prompt_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.patch_prompt_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> PromptResponse: + """ + Deploy Prompt to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.set_deployment(id='id', environment_id='environment_id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Prompt from the Environment. + + Remove the deployed version for the specified Environment. This Prompt + will no longer be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.remove_deployment(id='id', environment_id='environment_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.list_environments(id='pr_30gco7dx6JDq4200GVOHa', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: + """ + Activate and deactivate Evaluators for monitoring the Prompt. + + An activated Evaluator will automatically be run on all new Logs + within the Prompt for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.update_monitoring(id='pr_30gco7dx6JDq4200GVOHa', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.serialize(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> PromptKernelRequest: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptKernelRequest + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.prompts.deserialize(prompt='prompt', ) + asyncio.run(main()) + """ + _response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options) + return _response.data diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py new file mode 100644 index 00000000..5d12b08e --- /dev/null +++ b/src/humanloop/prompts/raw_client.py @@ -0,0 +1,3977 @@ +# This file was auto-generated by Fern from our API Definition. + +import contextlib +import datetime as dt +import json +import typing +from json.decoder import JSONDecodeError + +import httpx_sse +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..requests.chat_message import ChatMessageParams +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..requests.response_format import ResponseFormatParams +from ..requests.tool_function import ToolFunctionParams +from ..types.create_prompt_log_response import CreatePromptLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_sort_by import FileSortBy +from ..types.http_validation_error import HttpValidationError +from ..types.list_prompts import ListPrompts +from ..types.log_response import LogResponse +from ..types.log_status import LogStatus +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.paginated_data_prompt_response import PaginatedDataPromptResponse +from ..types.populate_template_response import PopulateTemplateResponse +from ..types.prompt_call_response import PromptCallResponse +from ..types.prompt_call_stream_response import PromptCallStreamResponse +from ..types.prompt_kernel_request import PromptKernelRequest +from ..types.prompt_response import PromptResponse +from ..types.sort_order import SortOrder +from ..types.template_language import TemplateLanguage +from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams +from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams +from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams +from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams +from .requests.prompt_request_stop import PromptRequestStopParams +from .requests.prompt_request_template import PromptRequestTemplateParams +from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams +from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams +from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams +from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawPromptsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompt_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreatePromptLogResponse]: + """ + Log to a Prompt. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptLogRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompt_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreatePromptLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "prompts/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptLogRequestPromptParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompt_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreatePromptLogResponse, + construct_type( + type_=CreatePromptLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_log( + self, + id: str, + log_id: str, + *, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[LogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" + ), + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + @contextlib.contextmanager + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallStreamRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]] + + """ + with self._client_wrapper.httpx_client.stream( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + def stream() -> HttpResponse[typing.Iterator[PromptCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + def _iter(): + _event_source = httpx_sse.EventSource(_response) + for _sse in _event_source.iter_sse(): + if _sse.data == None: + return + try: + yield typing.cast( + PromptCallStreamResponse, + construct_type( + type_=PromptCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except Exception: + pass + return + + return HttpResponse(response=_response, data=_iter()) + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.text + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + yield stream() + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptCallResponse]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptCallResponse] + + """ + _response = self._client_wrapper.httpx_client.request( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptCallResponse, + construct_type( + type_=PromptCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[PromptResponse]: + """ + Get a list of all Prompts. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Prompts to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Prompt name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Prompts by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[PromptResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "prompts", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataPromptResponse, + construct_type( + type_=PaginatedDataPromptResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[PromptRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[PromptRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, + linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Create a Prompt or update it with a new version if it already exists. + + Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[PromptRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[PromptRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[ToolFunctionParams]] + The tool specification that the model can choose to call if Tool calling is supported. + + linked_tools : typing.Optional[typing.Sequence[str]] + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "prompts", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=PromptRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=PromptRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write" + ), + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" + ), + "linked_tools": linked_tools, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Retrieve the Prompt with the given ID. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Prompt with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Move the Prompt to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + path : typing.Optional[str] + Path of the Prompt including the Prompt name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def populate( + self, + id: str, + *, + request: typing.Dict[str, typing.Optional[typing.Any]], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PopulateTemplateResponse]: + """ + Retrieve the Prompt with the given ID, including the populated template. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request : typing.Dict[str, typing.Optional[typing.Any]] + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve to populate the template. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from to populate the template. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PopulateTemplateResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/populate", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json=request, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PopulateTemplateResponse, + construct_type( + type_=PopulateTemplateResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListPrompts]: + """ + Get a list of all the versions of a Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListPrompts] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListPrompts, + construct_type( + type_=ListPrompts, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Update the name or description of the Prompt version. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[PromptResponse]: + """ + Deploy Prompt to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Prompt from the Environment. + + Remove the deployed version for the specified Environment. This Prompt + will no longer be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Activate and deactivate Evaluators for monitoring the Prompt. + + An activated Evaluator will automatically be run on all new Logs + within the Prompt for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[str]: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[str] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=_response.text) # type: ignore + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[PromptKernelRequest]: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptKernelRequest] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "prompts/deserialize", + method="POST", + json={ + "prompt": prompt, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptKernelRequest, + construct_type( + type_=PromptKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawPromptsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompt_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreatePromptLogResponse]: + """ + Log to a Prompt. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptLogRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompt_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreatePromptLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptLogRequestPromptParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompt_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreatePromptLogResponse, + construct_type( + type_=CreatePromptLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_log( + self, + id: str, + log_id: str, + *, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[LogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" + ), + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + @contextlib.asynccontextmanager + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallStreamRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]] + + """ + async with self._client_wrapper.httpx_client.stream( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + async def stream() -> AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + async def _iter(): + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + if _sse.data == None: + return + try: + yield typing.cast( + PromptCallStreamResponse, + construct_type( + type_=PromptCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except Exception: + pass + return + + return AsyncHttpResponse(response=_response, data=_iter()) + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError( + status_code=_response.status_code, headers=dict(_response.headers), body=_response.text + ) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + yield await stream() + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptCallResponse]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptsCallRequestPromptParams] + The Prompt configuration to use. Two formats are supported: + - An object representing the details of the Prompt configuration + - A string representing the raw contents of a .prompt file + A new Prompt version will be created if the provided details do not match any existing version. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptCallResponse] + + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptCallResponse, + construct_type( + type_=PromptCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[PromptResponse]: + """ + Get a list of all Prompts. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Prompts to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Prompt name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Prompts by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[PromptResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "prompts", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataPromptResponse, + construct_type( + type_=PaginatedDataPromptResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[PromptRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[PromptRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, + linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Create a Prompt or update it with a new version if it already exists. + + Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[PromptRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[PromptRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[ToolFunctionParams]] + The tool specification that the model can choose to call if Tool calling is supported. + + linked_tools : typing.Optional[typing.Sequence[str]] + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=PromptRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=PromptRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write" + ), + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" + ), + "linked_tools": linked_tools, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Retrieve the Prompt with the given ID. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Prompt with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Move the Prompt to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + path : typing.Optional[str] + Path of the Prompt including the Prompt name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def populate( + self, + id: str, + *, + request: typing.Dict[str, typing.Optional[typing.Any]], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PopulateTemplateResponse]: + """ + Retrieve the Prompt with the given ID, including the populated template. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request : typing.Dict[str, typing.Optional[typing.Any]] + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve to populate the template. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from to populate the template. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PopulateTemplateResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/populate", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json=request, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PopulateTemplateResponse, + construct_type( + type_=PopulateTemplateResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListPrompts]: + """ + Get a list of all the versions of a Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListPrompts] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListPrompts, + construct_type( + type_=ListPrompts, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Update the name or description of the Prompt version. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[PromptResponse]: + """ + Deploy Prompt to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Prompt from the Environment. + + Remove the deployed version for the specified Environment. This Prompt + will no longer be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Activate and deactivate Evaluators for monitoring the Prompt. + + An activated Evaluator will automatically be run on all new Logs + within the Prompt for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[str]: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[str] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=_response.text) # type: ignore + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[PromptKernelRequest]: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptKernelRequest] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts/deserialize", + method="POST", + json={ + "prompt": prompt, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptKernelRequest, + construct_type( + type_=PromptKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py new file mode 100644 index 00000000..67f6233e --- /dev/null +++ b/src/humanloop/prompts/requests/__init__.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .prompt_log_request_prompt import PromptLogRequestPromptParams +from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams +from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams +from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams +from .prompt_request_stop import PromptRequestStopParams +from .prompt_request_template import PromptRequestTemplateParams +from .prompts_call_request_prompt import PromptsCallRequestPromptParams +from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams +from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams +from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams + +__all__ = [ + "PromptLogRequestPromptParams", + "PromptLogRequestToolChoiceParams", + "PromptLogUpdateRequestToolChoiceParams", + "PromptRequestReasoningEffortParams", + "PromptRequestStopParams", + "PromptRequestTemplateParams", + "PromptsCallRequestPromptParams", + "PromptsCallRequestToolChoiceParams", + "PromptsCallStreamRequestPromptParams", + "PromptsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py new file mode 100644 index 00000000..18417e47 --- /dev/null +++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.prompt_kernel_request import PromptKernelRequestParams + +PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str] diff --git a/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py b/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py new file mode 100644 index 00000000..eb1a3a0d --- /dev/null +++ b/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.tool_choice import ToolChoiceParams + +PromptLogRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py b/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py new file mode 100644 index 00000000..18598c0a --- /dev/null +++ b/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.tool_choice import ToolChoiceParams + +PromptLogUpdateRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py new file mode 100644 index 00000000..c40a1fdd --- /dev/null +++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/prompts/requests/prompt_request_stop.py b/src/humanloop/prompts/requests/prompt_request_stop.py new file mode 100644 index 00000000..ed7fd9c7 --- /dev/null +++ b/src/humanloop/prompts/requests/prompt_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PromptRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/prompts/requests/prompt_request_template.py b/src/humanloop/prompts/requests/prompt_request_template.py new file mode 100644 index 00000000..51e6905d --- /dev/null +++ b/src/humanloop/prompts/requests/prompt_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.chat_message import ChatMessageParams + +PromptRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py new file mode 100644 index 00000000..c9ef087f --- /dev/null +++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.prompt_kernel_request import PromptKernelRequestParams + +PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str] diff --git a/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py b/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py new file mode 100644 index 00000000..9a2e39ad --- /dev/null +++ b/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.tool_choice import ToolChoiceParams + +PromptsCallRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py new file mode 100644 index 00000000..f27fc93b --- /dev/null +++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.prompt_kernel_request import PromptKernelRequestParams + +PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str] diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py b/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py new file mode 100644 index 00000000..d8e537d8 --- /dev/null +++ b/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...requests.tool_choice import ToolChoiceParams + +PromptsCallStreamRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py new file mode 100644 index 00000000..964060c2 --- /dev/null +++ b/src/humanloop/prompts/types/__init__.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .prompt_log_request_prompt import PromptLogRequestPrompt +from .prompt_log_request_tool_choice import PromptLogRequestToolChoice +from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice +from .prompt_request_reasoning_effort import PromptRequestReasoningEffort +from .prompt_request_stop import PromptRequestStop +from .prompt_request_template import PromptRequestTemplate +from .prompts_call_request_prompt import PromptsCallRequestPrompt +from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice +from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt +from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice + +__all__ = [ + "PromptLogRequestPrompt", + "PromptLogRequestToolChoice", + "PromptLogUpdateRequestToolChoice", + "PromptRequestReasoningEffort", + "PromptRequestStop", + "PromptRequestTemplate", + "PromptsCallRequestPrompt", + "PromptsCallRequestToolChoice", + "PromptsCallStreamRequestPrompt", + "PromptsCallStreamRequestToolChoice", +] diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py new file mode 100644 index 00000000..6b1c1c5e --- /dev/null +++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.prompt_kernel_request import PromptKernelRequest + +PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str] diff --git a/src/humanloop/prompts/types/prompt_log_request_tool_choice.py b/src/humanloop/prompts/types/prompt_log_request_tool_choice.py new file mode 100644 index 00000000..f7c0c6e9 --- /dev/null +++ b/src/humanloop/prompts/types/prompt_log_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.tool_choice import ToolChoice + +PromptLogRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py b/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py new file mode 100644 index 00000000..0edb325c --- /dev/null +++ b/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.tool_choice import ToolChoice + +PromptLogUpdateRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py new file mode 100644 index 00000000..89eefb37 --- /dev/null +++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/prompts/types/prompt_request_stop.py b/src/humanloop/prompts/types/prompt_request_stop.py new file mode 100644 index 00000000..e2f6d535 --- /dev/null +++ b/src/humanloop/prompts/types/prompt_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PromptRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/prompts/types/prompt_request_template.py b/src/humanloop/prompts/types/prompt_request_template.py new file mode 100644 index 00000000..0e3dc1b4 --- /dev/null +++ b/src/humanloop/prompts/types/prompt_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.chat_message import ChatMessage + +PromptRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py new file mode 100644 index 00000000..98cb80c3 --- /dev/null +++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.prompt_kernel_request import PromptKernelRequest + +PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str] diff --git a/src/humanloop/prompts/types/prompts_call_request_tool_choice.py b/src/humanloop/prompts/types/prompts_call_request_tool_choice.py new file mode 100644 index 00000000..8fc2cad0 --- /dev/null +++ b/src/humanloop/prompts/types/prompts_call_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.tool_choice import ToolChoice + +PromptsCallRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py new file mode 100644 index 00000000..c623bcae --- /dev/null +++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.prompt_kernel_request import PromptKernelRequest + +PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str] diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py b/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py new file mode 100644 index 00000000..67b9e533 --- /dev/null +++ b/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...types.tool_choice import ToolChoice + +PromptsCallStreamRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/py.typed b/src/humanloop/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py new file mode 100644 index 00000000..a95f70ac --- /dev/null +++ b/src/humanloop/requests/__init__.py @@ -0,0 +1,339 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .agent_call_response import AgentCallResponseParams +from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams +from .agent_call_stream_response import AgentCallStreamResponseParams +from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams +from .agent_config_response import AgentConfigResponseParams +from .agent_continue_call_response import AgentContinueCallResponseParams +from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams +from .agent_continue_call_stream_response import AgentContinueCallStreamResponseParams +from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams +from .agent_inline_tool import AgentInlineToolParams +from .agent_kernel_request import AgentKernelRequestParams +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams +from .agent_kernel_request_stop import AgentKernelRequestStopParams +from .agent_kernel_request_template import AgentKernelRequestTemplateParams +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams +from .agent_linked_file_request import AgentLinkedFileRequestParams +from .agent_linked_file_response import AgentLinkedFileResponseParams +from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams +from .agent_log_response import AgentLogResponseParams +from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams +from .agent_log_stream_response import AgentLogStreamResponseParams +from .agent_response import AgentResponseParams +from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams +from .agent_response_stop import AgentResponseStopParams +from .agent_response_template import AgentResponseTemplateParams +from .agent_response_tools_item import AgentResponseToolsItemParams +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams +from .anthropic_thinking_content import AnthropicThinkingContentParams +from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams +from .chat_message import ChatMessageParams +from .chat_message_content import ChatMessageContentParams +from .chat_message_content_item import ChatMessageContentItemParams +from .chat_message_thinking_item import ChatMessageThinkingItemParams +from .code_evaluator_request import CodeEvaluatorRequestParams +from .create_agent_log_response import CreateAgentLogResponseParams +from .create_datapoint_request import CreateDatapointRequestParams +from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams +from .create_evaluator_log_response import CreateEvaluatorLogResponseParams +from .create_flow_log_response import CreateFlowLogResponseParams +from .create_prompt_log_response import CreatePromptLogResponseParams +from .create_tool_log_response import CreateToolLogResponseParams +from .dashboard_configuration import DashboardConfigurationParams +from .datapoint_response import DatapointResponseParams +from .datapoint_response_target_value import DatapointResponseTargetValueParams +from .dataset_response import DatasetResponseParams +from .directory_response import DirectoryResponseParams +from .directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponseParams +from .directory_with_parents_and_children_response_files_item import ( + DirectoryWithParentsAndChildrenResponseFilesItemParams, +) +from .environment_response import EnvironmentResponseParams +from .evaluatee_request import EvaluateeRequestParams +from .evaluatee_response import EvaluateeResponseParams +from .evaluation_evaluator_response import EvaluationEvaluatorResponseParams +from .evaluation_log_response import EvaluationLogResponseParams +from .evaluation_response import EvaluationResponseParams +from .evaluation_run_response import EvaluationRunResponseParams +from .evaluation_runs_response import EvaluationRunsResponseParams +from .evaluation_stats import EvaluationStatsParams +from .evaluator_activation_deactivation_request import EvaluatorActivationDeactivationRequestParams +from .evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from .evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from .evaluator_aggregate import EvaluatorAggregateParams +from .evaluator_config_response import EvaluatorConfigResponseParams +from .evaluator_file_id import EvaluatorFileIdParams +from .evaluator_file_path import EvaluatorFilePathParams +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams +from .evaluator_log_response import EvaluatorLogResponseParams +from .evaluator_log_response_judgment import EvaluatorLogResponseJudgmentParams +from .evaluator_response import EvaluatorResponseParams +from .evaluator_response_spec import EvaluatorResponseSpecParams +from .evaluator_version_id import EvaluatorVersionIdParams +from .external_evaluator_request import ExternalEvaluatorRequestParams +from .file_environment_response import FileEnvironmentResponseParams +from .file_environment_response_file import FileEnvironmentResponseFileParams +from .file_environment_variable_request import FileEnvironmentVariableRequestParams +from .file_id import FileIdParams +from .file_path import FilePathParams +from .file_request import FileRequestParams +from .flow_kernel_request import FlowKernelRequestParams +from .flow_log_response import FlowLogResponseParams +from .flow_response import FlowResponseParams +from .function_tool import FunctionToolParams +from .function_tool_choice import FunctionToolChoiceParams +from .http_validation_error import HttpValidationErrorParams +from .human_evaluator_request import HumanEvaluatorRequestParams +from .image_chat_content import ImageChatContentParams +from .image_url import ImageUrlParams +from .input_response import InputResponseParams +from .linked_file_request import LinkedFileRequestParams +from .linked_tool_response import LinkedToolResponseParams +from .list_agents import ListAgentsParams +from .list_datasets import ListDatasetsParams +from .list_evaluators import ListEvaluatorsParams +from .list_flows import ListFlowsParams +from .list_prompts import ListPromptsParams +from .list_tools import ListToolsParams +from .llm_evaluator_request import LlmEvaluatorRequestParams +from .log_response import LogResponseParams +from .log_stream_response import LogStreamResponseParams +from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams +from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams +from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams +from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams +from .overall_stats import OverallStatsParams +from .paginated_data_agent_response import PaginatedDataAgentResponseParams +from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams +from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams +from .paginated_data_flow_response import PaginatedDataFlowResponseParams +from .paginated_data_log_response import PaginatedDataLogResponseParams +from .paginated_data_prompt_response import PaginatedDataPromptResponseParams +from .paginated_data_tool_response import PaginatedDataToolResponseParams +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, +) +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, +) +from .paginated_datapoint_response import PaginatedDatapointResponseParams +from .paginated_dataset_response import PaginatedDatasetResponseParams +from .paginated_evaluation_response import PaginatedEvaluationResponseParams +from .populate_template_response import PopulateTemplateResponseParams +from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams +from .populate_template_response_stop import PopulateTemplateResponseStopParams +from .populate_template_response_template import PopulateTemplateResponseTemplateParams +from .prompt_call_log_response import PromptCallLogResponseParams +from .prompt_call_response import PromptCallResponseParams +from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams +from .prompt_call_stream_response import PromptCallStreamResponseParams +from .prompt_kernel_request import PromptKernelRequestParams +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams +from .prompt_kernel_request_stop import PromptKernelRequestStopParams +from .prompt_kernel_request_template import PromptKernelRequestTemplateParams +from .prompt_log_response import PromptLogResponseParams +from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams +from .prompt_response import PromptResponseParams +from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams +from .prompt_response_stop import PromptResponseStopParams +from .prompt_response_template import PromptResponseTemplateParams +from .provider_api_keys import ProviderApiKeysParams +from .response_format import ResponseFormatParams +from .run_stats_response import RunStatsResponseParams +from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams +from .run_version_response import RunVersionResponseParams +from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams +from .text_chat_content import TextChatContentParams +from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams +from .tool_call import ToolCallParams +from .tool_call_response import ToolCallResponseParams +from .tool_choice import ToolChoiceParams +from .tool_function import ToolFunctionParams +from .tool_kernel_request import ToolKernelRequestParams +from .tool_log_response import ToolLogResponseParams +from .tool_response import ToolResponseParams +from .update_version_request import UpdateVersionRequestParams +from .validation_error import ValidationErrorParams +from .validation_error_loc_item import ValidationErrorLocItemParams +from .version_deployment_response import VersionDeploymentResponseParams +from .version_deployment_response_file import VersionDeploymentResponseFileParams +from .version_id import VersionIdParams +from .version_id_response import VersionIdResponseParams +from .version_id_response_version import VersionIdResponseVersionParams +from .version_reference_response import VersionReferenceResponseParams +from .version_stats_response import VersionStatsResponseParams +from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams + +__all__ = [ + "AgentCallResponseParams", + "AgentCallResponseToolChoiceParams", + "AgentCallStreamResponseParams", + "AgentCallStreamResponsePayloadParams", + "AgentConfigResponseParams", + "AgentContinueCallResponseParams", + "AgentContinueCallResponseToolChoiceParams", + "AgentContinueCallStreamResponseParams", + "AgentContinueCallStreamResponsePayloadParams", + "AgentInlineToolParams", + "AgentKernelRequestParams", + "AgentKernelRequestReasoningEffortParams", + "AgentKernelRequestStopParams", + "AgentKernelRequestTemplateParams", + "AgentKernelRequestToolsItemParams", + "AgentLinkedFileRequestParams", + "AgentLinkedFileResponseFileParams", + "AgentLinkedFileResponseParams", + "AgentLogResponseParams", + "AgentLogResponseToolChoiceParams", + "AgentLogStreamResponseParams", + "AgentResponseParams", + "AgentResponseReasoningEffortParams", + "AgentResponseStopParams", + "AgentResponseTemplateParams", + "AgentResponseToolsItemParams", + "AnthropicRedactedThinkingContentParams", + "AnthropicThinkingContentParams", + "BooleanEvaluatorStatsResponseParams", + "ChatMessageContentItemParams", + "ChatMessageContentParams", + "ChatMessageParams", + "ChatMessageThinkingItemParams", + "CodeEvaluatorRequestParams", + "CreateAgentLogResponseParams", + "CreateDatapointRequestParams", + "CreateDatapointRequestTargetValueParams", + "CreateEvaluatorLogResponseParams", + "CreateFlowLogResponseParams", + "CreatePromptLogResponseParams", + "CreateToolLogResponseParams", + "DashboardConfigurationParams", + "DatapointResponseParams", + "DatapointResponseTargetValueParams", + "DatasetResponseParams", + "DirectoryResponseParams", + "DirectoryWithParentsAndChildrenResponseFilesItemParams", + "DirectoryWithParentsAndChildrenResponseParams", + "EnvironmentResponseParams", + "EvaluateeRequestParams", + "EvaluateeResponseParams", + "EvaluationEvaluatorResponseParams", + "EvaluationLogResponseParams", + "EvaluationResponseParams", + "EvaluationRunResponseParams", + "EvaluationRunsResponseParams", + "EvaluationStatsParams", + "EvaluatorActivationDeactivationRequestActivateItemParams", + "EvaluatorActivationDeactivationRequestDeactivateItemParams", + "EvaluatorActivationDeactivationRequestParams", + "EvaluatorAggregateParams", + "EvaluatorConfigResponseParams", + "EvaluatorFileIdParams", + "EvaluatorFilePathParams", + "EvaluatorJudgmentNumberLimitParams", + "EvaluatorJudgmentOptionResponseParams", + "EvaluatorLogResponseJudgmentParams", + "EvaluatorLogResponseParams", + "EvaluatorResponseParams", + "EvaluatorResponseSpecParams", + "EvaluatorVersionIdParams", + "ExternalEvaluatorRequestParams", + "FileEnvironmentResponseFileParams", + "FileEnvironmentResponseParams", + "FileEnvironmentVariableRequestParams", + "FileIdParams", + "FilePathParams", + "FileRequestParams", + "FlowKernelRequestParams", + "FlowLogResponseParams", + "FlowResponseParams", + "FunctionToolChoiceParams", + "FunctionToolParams", + "HttpValidationErrorParams", + "HumanEvaluatorRequestParams", + "ImageChatContentParams", + "ImageUrlParams", + "InputResponseParams", + "LinkedFileRequestParams", + "LinkedToolResponseParams", + "ListAgentsParams", + "ListDatasetsParams", + "ListEvaluatorsParams", + "ListFlowsParams", + "ListPromptsParams", + "ListToolsParams", + "LlmEvaluatorRequestParams", + "LogResponseParams", + "LogStreamResponseParams", + "MonitoringEvaluatorEnvironmentRequestParams", + "MonitoringEvaluatorResponseParams", + "MonitoringEvaluatorVersionRequestParams", + "NumericEvaluatorStatsResponseParams", + "OverallStatsParams", + "PaginatedDataAgentResponseParams", + "PaginatedDataEvaluationLogResponseParams", + "PaginatedDataEvaluatorResponseParams", + "PaginatedDataFlowResponseParams", + "PaginatedDataLogResponseParams", + "PaginatedDataPromptResponseParams", + "PaginatedDataToolResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", + "PaginatedDatapointResponseParams", + "PaginatedDatasetResponseParams", + "PaginatedEvaluationResponseParams", + "PopulateTemplateResponseParams", + "PopulateTemplateResponsePopulatedTemplateParams", + "PopulateTemplateResponseReasoningEffortParams", + "PopulateTemplateResponseStopParams", + "PopulateTemplateResponseTemplateParams", + "PromptCallLogResponseParams", + "PromptCallResponseParams", + "PromptCallResponseToolChoiceParams", + "PromptCallStreamResponseParams", + "PromptKernelRequestParams", + "PromptKernelRequestReasoningEffortParams", + "PromptKernelRequestStopParams", + "PromptKernelRequestTemplateParams", + "PromptLogResponseParams", + "PromptLogResponseToolChoiceParams", + "PromptResponseParams", + "PromptResponseReasoningEffortParams", + "PromptResponseStopParams", + "PromptResponseTemplateParams", + "ProviderApiKeysParams", + "ResponseFormatParams", + "RunStatsResponseEvaluatorStatsItemParams", + "RunStatsResponseParams", + "RunVersionResponseParams", + "SelectEvaluatorStatsResponseParams", + "TextChatContentParams", + "TextEvaluatorStatsResponseParams", + "ToolCallParams", + "ToolCallResponseParams", + "ToolChoiceParams", + "ToolFunctionParams", + "ToolKernelRequestParams", + "ToolLogResponseParams", + "ToolResponseParams", + "UpdateVersionRequestParams", + "ValidationErrorLocItemParams", + "ValidationErrorParams", + "VersionDeploymentResponseFileParams", + "VersionDeploymentResponseParams", + "VersionIdParams", + "VersionIdResponseParams", + "VersionIdResponseVersionParams", + "VersionReferenceResponseParams", + "VersionStatsResponseEvaluatorVersionStatsItemParams", + "VersionStatsResponseParams", +] diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py new file mode 100644 index 00000000..1e72ba93 --- /dev/null +++ b/src/humanloop/requests/agent_call_response.py @@ -0,0 +1,202 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams +from .agent_response import AgentResponseParams +from .chat_message import ChatMessageParams +from .evaluator_log_response import EvaluatorLogResponseParams +from .log_response import LogResponseParams + + +class AgentCallResponseParams(typing_extensions.TypedDict): + """ + Response model for a Agent call. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponseParams + """ + Agent that generated the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py new file mode 100644 index 00000000..906cdf4b --- /dev/null +++ b/src/humanloop/requests/agent_call_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoiceParams + +AgentCallResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py new file mode 100644 index 00000000..9bc8d29c --- /dev/null +++ b/src/humanloop/requests/agent_call_stream_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from ..types.event_type import EventType +from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams + + +class AgentCallStreamResponseParams(typing_extensions.TypedDict): + """ + Response model for calling Agent in streaming mode. + """ + + log_id: str + message: str + payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams] + type: EventType + created_at: dt.datetime diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py new file mode 100644 index 00000000..876525c3 --- /dev/null +++ b/src/humanloop/requests/agent_call_stream_response_payload.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .log_response import LogResponseParams +from .log_stream_response import LogStreamResponseParams +from .tool_call import ToolCallParams + +AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams] diff --git a/src/humanloop/requests/agent_config_response.py b/src/humanloop/requests/agent_config_response.py new file mode 100644 index 00000000..c2bd9e46 --- /dev/null +++ b/src/humanloop/requests/agent_config_response.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class AgentConfigResponseParams(typing_extensions.TypedDict): + pass diff --git a/src/humanloop/requests/agent_continue_call_response.py b/src/humanloop/requests/agent_continue_call_response.py new file mode 100644 index 00000000..d30b4f39 --- /dev/null +++ b/src/humanloop/requests/agent_continue_call_response.py @@ -0,0 +1,202 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams +from .agent_response import AgentResponseParams +from .chat_message import ChatMessageParams +from .evaluator_log_response import EvaluatorLogResponseParams +from .log_response import LogResponseParams + + +class AgentContinueCallResponseParams(typing_extensions.TypedDict): + """ + Response model for continuing an Agent call. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentContinueCallResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponseParams + """ + Agent that generated the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ diff --git a/src/humanloop/requests/agent_continue_call_response_tool_choice.py b/src/humanloop/requests/agent_continue_call_response_tool_choice.py new file mode 100644 index 00000000..2111fd9a --- /dev/null +++ b/src/humanloop/requests/agent_continue_call_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoiceParams + +AgentContinueCallResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_continue_call_stream_response.py b/src/humanloop/requests/agent_continue_call_stream_response.py new file mode 100644 index 00000000..bf725bb5 --- /dev/null +++ b/src/humanloop/requests/agent_continue_call_stream_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from ..types.event_type import EventType +from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams + + +class AgentContinueCallStreamResponseParams(typing_extensions.TypedDict): + """ + Response model for continuing an Agent call in streaming mode. + """ + + log_id: str + message: str + payload: typing_extensions.NotRequired[AgentContinueCallStreamResponsePayloadParams] + type: EventType + created_at: dt.datetime diff --git a/src/humanloop/requests/agent_continue_call_stream_response_payload.py b/src/humanloop/requests/agent_continue_call_stream_response_payload.py new file mode 100644 index 00000000..e176905a --- /dev/null +++ b/src/humanloop/requests/agent_continue_call_stream_response_payload.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .log_response import LogResponseParams +from .log_stream_response import LogStreamResponseParams +from .tool_call import ToolCallParams + +AgentContinueCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams] diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py new file mode 100644 index 00000000..4d86d77e --- /dev/null +++ b/src/humanloop/requests/agent_inline_tool.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.on_agent_call_enum import OnAgentCallEnum +from .tool_function import ToolFunctionParams + + +class AgentInlineToolParams(typing_extensions.TypedDict): + type: typing.Literal["inline"] + json_schema: ToolFunctionParams + on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py new file mode 100644 index 00000000..8bc43e3d --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request.py @@ -0,0 +1,112 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.template_language import TemplateLanguage +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams +from .agent_kernel_request_stop import AgentKernelRequestStopParams +from .agent_kernel_request_template import AgentKernelRequestTemplateParams +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams +from .response_format import ResponseFormatParams + + +class AgentKernelRequestParams(typing_extensions.TypedDict): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[AgentKernelRequestStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams] + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]] + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing_extensions.NotRequired[int] + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..ef446d7b --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py new file mode 100644 index 00000000..eae95d35 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py new file mode 100644 index 00000000..875dc18b --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessageParams + +AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py new file mode 100644 index 00000000..5ee508f8 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_tools_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_inline_tool import AgentInlineToolParams +from .agent_linked_file_request import AgentLinkedFileRequestParams + +AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py new file mode 100644 index 00000000..e8950811 --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_request.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.on_agent_call_enum import OnAgentCallEnum +from .linked_file_request import LinkedFileRequestParams + + +class AgentLinkedFileRequestParams(typing_extensions.TypedDict): + type: typing.Literal["file"] + link: LinkedFileRequestParams + on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py new file mode 100644 index 00000000..1bcc8128 --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import typing_extensions +from ..types.on_agent_call_enum import OnAgentCallEnum +from .linked_file_request import LinkedFileRequestParams + +if typing.TYPE_CHECKING: + from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams + + +class AgentLinkedFileResponseParams(typing_extensions.TypedDict): + type: typing.Literal["file"] + link: LinkedFileRequestParams + on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] + file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"] diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py new file mode 100644 index 00000000..25c71dbe --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_response_file.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .dataset_response import DatasetResponseParams + +if typing.TYPE_CHECKING: + from .agent_response import AgentResponseParams + from .evaluator_response import EvaluatorResponseParams + from .flow_response import FlowResponseParams + from .prompt_response import PromptResponseParams + from .tool_response import ToolResponseParams +AgentLinkedFileResponseFileParams = typing.Union[ + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", +] diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py new file mode 100644 index 00000000..940f348f --- /dev/null +++ b/src/humanloop/requests/agent_log_response.py @@ -0,0 +1,201 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams +from .agent_response import AgentResponseParams +from .chat_message import ChatMessageParams + +if typing.TYPE_CHECKING: + from .evaluator_log_response import EvaluatorLogResponseParams + from .log_response import LogResponseParams + + +class AgentLogResponseParams(typing_extensions.TypedDict): + """ + General request for creating a Log + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponseParams + """ + Agent that generated the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] + """ + Logs nested under this Log in the Trace. + """ diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py new file mode 100644 index 00000000..30ebcb72 --- /dev/null +++ b/src/humanloop/requests/agent_log_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoiceParams + +AgentLogResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py new file mode 100644 index 00000000..cd35485e --- /dev/null +++ b/src/humanloop/requests/agent_log_stream_response.py @@ -0,0 +1,87 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from .chat_message import ChatMessageParams + + +class AgentLogStreamResponseParams(typing_extensions.TypedDict): + """ + Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + id: str + """ + ID of the log. + """ + + agent_id: str + """ + ID of the Agent the log belongs to. + """ + + version_id: str + """ + ID of the specific version of the Agent. + """ diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py new file mode 100644 index 00000000..10f47b80 --- /dev/null +++ b/src/humanloop/requests/agent_response.py @@ -0,0 +1,242 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.template_language import TemplateLanguage +from ..types.user_response import UserResponse +from ..types.version_status import VersionStatus +from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams +from .agent_response_stop import AgentResponseStopParams +from .agent_response_template import AgentResponseTemplateParams +from .environment_response import EnvironmentResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams +from .input_response import InputResponseParams +from .response_format import ResponseFormatParams + +if typing.TYPE_CHECKING: + from .agent_response_tools_item import AgentResponseToolsItemParams + from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams + + +class AgentResponseParams(typing_extensions.TypedDict): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str + """ + Path of the Agent, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Agent. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[AgentResponseTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[AgentResponseStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams] + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.Sequence["AgentResponseToolsItemParams"] + """ + List of tools that the Agent can call. These can be linked files or inline tools. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing_extensions.NotRequired[int] + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Agent version. Version names must be unique for a given Agent. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Agent. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + name: str + """ + Name of the Agent. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + + version_id: str + """ + Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["agent"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Agent Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Agent. + """ + + committed_by: typing_extensions.NotRequired[UserResponse] + """ + The user who committed the Agent Version. + """ + + committed_at: typing_extensions.NotRequired[dt.datetime] + """ + The date and time the Agent Version was committed. + """ + + status: VersionStatus + """ + The status of the Agent Version. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Agent Version + """ + + total_logs_count: int + """ + The number of logs that have been generated across all Agent Versions + """ + + inputs: typing.Sequence[InputResponseParams] + """ + Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] + """ + Evaluators that have been attached to this Agent that are used for monitoring logs. + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Agent Version. + """ + + raw_file_content: typing_extensions.NotRequired[str] + """ + The raw content of the Agent. Corresponds to the .agent file. + """ diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py new file mode 100644 index 00000000..a32f2ecf --- /dev/null +++ b/src/humanloop/requests/agent_response_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py new file mode 100644 index 00000000..a395ee73 --- /dev/null +++ b/src/humanloop/requests/agent_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py new file mode 100644 index 00000000..3998be1b --- /dev/null +++ b/src/humanloop/requests/agent_response_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessageParams + +AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py new file mode 100644 index 00000000..87e1e036 --- /dev/null +++ b/src/humanloop/requests/agent_response_tools_item.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .agent_inline_tool import AgentInlineToolParams + +if typing.TYPE_CHECKING: + from .agent_linked_file_response import AgentLinkedFileResponseParams +AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams] diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py new file mode 100644 index 00000000..b71f614e --- /dev/null +++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict): + type: typing.Literal["redacted_thinking"] + data: str + """ + Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic + """ diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py new file mode 100644 index 00000000..23fdffb6 --- /dev/null +++ b/src/humanloop/requests/anthropic_thinking_content.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class AnthropicThinkingContentParams(typing_extensions.TypedDict): + type: typing.Literal["thinking"] + thinking: str + """ + Model's chain-of-thought for providing the response. + """ + + signature: str + """ + Cryptographic signature that verifies the thinking block was generated by Anthropic. + """ diff --git a/src/humanloop/requests/boolean_evaluator_stats_response.py b/src/humanloop/requests/boolean_evaluator_stats_response.py new file mode 100644 index 00000000..18618f40 --- /dev/null +++ b/src/humanloop/requests/boolean_evaluator_stats_response.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class BooleanEvaluatorStatsResponseParams(typing_extensions.TypedDict): + """ + Base attributes for stats for an Evaluator Version-Evaluated Version pair + in the Evaluation. + """ + + evaluator_version_id: str + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int + """ + The total number of errored Evaluators for this Evaluator Version. + """ + + num_true: int + """ + The total number of `True` judgments for this Evaluator Version. + """ + + num_false: int + """ + The total number of `False` judgments for this Evaluator Version. + """ diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py new file mode 100644 index 00000000..eeb6c7cd --- /dev/null +++ b/src/humanloop/requests/chat_message.py @@ -0,0 +1,41 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.chat_role import ChatRole +from .chat_message_content import ChatMessageContentParams +from .chat_message_thinking_item import ChatMessageThinkingItemParams +from .tool_call import ToolCallParams + + +class ChatMessageParams(typing_extensions.TypedDict): + content: typing_extensions.NotRequired[ChatMessageContentParams] + """ + The content of the message. + """ + + name: typing_extensions.NotRequired[str] + """ + Optional name of the message author. + """ + + tool_call_id: typing_extensions.NotRequired[str] + """ + Tool call that this message is responding to. + """ + + role: ChatRole + """ + Role of the message author. + """ + + tool_calls: typing_extensions.NotRequired[typing.Sequence[ToolCallParams]] + """ + A list of tool calls requested by the assistant. + """ + + thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]] + """ + Model's chain-of-thought for providing the response. Present on assistant messages if model supports it. + """ diff --git a/src/humanloop/requests/chat_message_content.py b/src/humanloop/requests/chat_message_content.py new file mode 100644 index 00000000..ea04974e --- /dev/null +++ b/src/humanloop/requests/chat_message_content.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message_content_item import ChatMessageContentItemParams + +ChatMessageContentParams = typing.Union[str, typing.Sequence[ChatMessageContentItemParams]] diff --git a/src/humanloop/requests/chat_message_content_item.py b/src/humanloop/requests/chat_message_content_item.py new file mode 100644 index 00000000..c4a24ea7 --- /dev/null +++ b/src/humanloop/requests/chat_message_content_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .image_chat_content import ImageChatContentParams +from .text_chat_content import TextChatContentParams + +ChatMessageContentItemParams = typing.Union[TextChatContentParams, ImageChatContentParams] diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py new file mode 100644 index 00000000..0c54d371 --- /dev/null +++ b/src/humanloop/requests/chat_message_thinking_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams +from .anthropic_thinking_content import AnthropicThinkingContentParams + +ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams] diff --git a/src/humanloop/requests/code_evaluator_request.py b/src/humanloop/requests/code_evaluator_request.py new file mode 100644 index 00000000..914d8f46 --- /dev/null +++ b/src/humanloop/requests/code_evaluator_request.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.evaluator_arguments_type import EvaluatorArgumentsType +from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum +from ..types.valence import Valence +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams + + +class CodeEvaluatorRequestParams(typing_extensions.TypedDict): + arguments_type: EvaluatorArgumentsType + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: EvaluatorReturnTypeEnum + """ + The type of the return value of the Evaluator. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] + """ + The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. + """ + + number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing_extensions.NotRequired[Valence] + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["python"] + code: str + """ + The code for the Evaluator. This code will be executed in a sandboxed environment. + """ diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py new file mode 100644 index 00000000..f68f2e96 --- /dev/null +++ b/src/humanloop/requests/create_agent_log_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.log_status import LogStatus + + +class CreateAgentLogResponseParams(typing_extensions.TypedDict): + """ + Response for an Agent Log. + """ + + id: str + """ + Unique identifier for the Log. + """ + + agent_id: str + """ + Unique identifier for the Agent. + """ + + version_id: str + """ + Unique identifier for the Agent Version. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs. + """ diff --git a/src/humanloop/requests/create_datapoint_request.py b/src/humanloop/requests/create_datapoint_request.py new file mode 100644 index 00000000..10ada080 --- /dev/null +++ b/src/humanloop/requests/create_datapoint_request.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .chat_message import ChatMessageParams +from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams + + +class CreateDatapointRequestParams(typing_extensions.TypedDict): + inputs: typing_extensions.NotRequired[typing.Dict[str, str]] + """ + The inputs to the prompt template. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + List of chat messages to provide to the model. + """ + + target: typing_extensions.NotRequired[typing.Dict[str, CreateDatapointRequestTargetValueParams]] + """ + Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. + """ diff --git a/src/humanloop/requests/create_datapoint_request_target_value.py b/src/humanloop/requests/create_datapoint_request_target_value.py new file mode 100644 index 00000000..ff6ed57f --- /dev/null +++ b/src/humanloop/requests/create_datapoint_request_target_value.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateDatapointRequestTargetValueParams = typing.Union[ + str, int, float, bool, typing.Sequence[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] +] diff --git a/src/humanloop/requests/create_evaluator_log_response.py b/src/humanloop/requests/create_evaluator_log_response.py new file mode 100644 index 00000000..29fbcdc5 --- /dev/null +++ b/src/humanloop/requests/create_evaluator_log_response.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class CreateEvaluatorLogResponseParams(typing_extensions.TypedDict): + id: str + """ + String identifier of the new Log. + """ + + parent_id: str + """ + Identifier of the evaluated parent Log. + """ + + session_id: typing_extensions.NotRequired[str] + """ + Identifier of the Session containing both the parent and the new child Log. If the parent Log does not belong to a Session, a new Session is created with this ID. + """ + + version_id: str + """ + Identifier of Evaluator Version for which the Log was registered. + """ diff --git a/src/humanloop/requests/create_flow_log_response.py b/src/humanloop/requests/create_flow_log_response.py new file mode 100644 index 00000000..6f490ba3 --- /dev/null +++ b/src/humanloop/requests/create_flow_log_response.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.log_status import LogStatus + + +class CreateFlowLogResponseParams(typing_extensions.TypedDict): + """ + Response for a Flow Log. + """ + + id: str + """ + Unique identifier for the Log. + """ + + flow_id: str + """ + Unique identifier for the Flow. + """ + + version_id: str + """ + Unique identifier for the Flow Version. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + """ diff --git a/src/humanloop/requests/create_prompt_log_response.py b/src/humanloop/requests/create_prompt_log_response.py new file mode 100644 index 00000000..8a0b39d3 --- /dev/null +++ b/src/humanloop/requests/create_prompt_log_response.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class CreatePromptLogResponseParams(typing_extensions.TypedDict): + id: str + """ + String ID of log. + """ + + prompt_id: str + """ + ID of the Prompt the log belongs to. + """ + + version_id: str + """ + ID of the specific version of the Prompt. + """ + + session_id: typing_extensions.NotRequired[str] + """ + String ID of session the log belongs to. + """ diff --git a/src/humanloop/requests/create_tool_log_response.py b/src/humanloop/requests/create_tool_log_response.py new file mode 100644 index 00000000..9b898fba --- /dev/null +++ b/src/humanloop/requests/create_tool_log_response.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class CreateToolLogResponseParams(typing_extensions.TypedDict): + id: str + """ + String ID of log. + """ + + tool_id: str + """ + ID of the Tool the log belongs to. + """ + + version_id: str + """ + ID of the specific version of the Tool. + """ + + session_id: typing_extensions.NotRequired[str] + """ + String ID of session the log belongs to. + """ diff --git a/src/humanloop/requests/dashboard_configuration.py b/src/humanloop/requests/dashboard_configuration.py new file mode 100644 index 00000000..b123ac78 --- /dev/null +++ b/src/humanloop/requests/dashboard_configuration.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.time_unit import TimeUnit + + +class DashboardConfigurationParams(typing_extensions.TypedDict): + time_unit: TimeUnit + time_range_days: int + model_config_ids: typing.Sequence[str] diff --git a/src/humanloop/requests/datapoint_response.py b/src/humanloop/requests/datapoint_response.py new file mode 100644 index 00000000..ba1928e9 --- /dev/null +++ b/src/humanloop/requests/datapoint_response.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .chat_message import ChatMessageParams +from .datapoint_response_target_value import DatapointResponseTargetValueParams + + +class DatapointResponseParams(typing_extensions.TypedDict): + inputs: typing_extensions.NotRequired[typing.Dict[str, str]] + """ + The inputs to the prompt template. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + List of chat messages to provide to the model. + """ + + target: typing_extensions.NotRequired[typing.Dict[str, DatapointResponseTargetValueParams]] + """ + Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. + """ + + id: str + """ + Unique identifier for the Datapoint. Starts with `dp_`. + """ diff --git a/src/humanloop/requests/datapoint_response_target_value.py b/src/humanloop/requests/datapoint_response_target_value.py new file mode 100644 index 00000000..43cbdaa7 --- /dev/null +++ b/src/humanloop/requests/datapoint_response_target_value.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DatapointResponseTargetValueParams = typing.Union[ + str, int, float, bool, typing.Sequence[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] +] diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py new file mode 100644 index 00000000..aa0119e9 --- /dev/null +++ b/src/humanloop/requests/dataset_response.py @@ -0,0 +1,102 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from ..types.user_response import UserResponse +from .datapoint_response import DatapointResponseParams +from .environment_response import EnvironmentResponseParams + + +class DatasetResponseParams(typing_extensions.TypedDict): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str + """ + Path of the Dataset, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Dataset. Starts with `ds_`. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + name: str + """ + Name of the Dataset, which is used as a unique identifier. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Dataset. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + version_id: str + """ + Unique identifier for the specific Dataset Version. If no query params provided, the default deployed Dataset Version is returned. Starts with `dsv_`. + """ + + type: typing_extensions.NotRequired[typing.Literal["dataset"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Dataset Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Dataset. + """ + + last_used_at: dt.datetime + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Dataset version. Version names must be unique for a given Dataset. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. + """ + + datapoints_count: int + """ + The number of Datapoints in this Dataset version. + """ + + datapoints: typing_extensions.NotRequired[typing.Sequence[DatapointResponseParams]] + """ + The list of Datapoints in this Dataset version. Only provided if explicitly requested. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + """ diff --git a/src/humanloop/requests/directory_response.py b/src/humanloop/requests/directory_response.py new file mode 100644 index 00000000..4dc4a7d5 --- /dev/null +++ b/src/humanloop/requests/directory_response.py @@ -0,0 +1,46 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions + + +class DirectoryResponseParams(typing_extensions.TypedDict): + id: str + """ + String ID of directory. Starts with `dir_`. + """ + + parent_id: typing_extensions.NotRequired[str] + """ + ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. + """ + + name: str + """ + Name of the directory. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the directory. + """ + + path: str + """ + Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the directory. + """ + + tags: typing.Sequence[str] + """ + List of tags associated with the directory. + """ + + created_at: dt.datetime + updated_at: dt.datetime diff --git a/src/humanloop/requests/directory_with_parents_and_children_response.py b/src/humanloop/requests/directory_with_parents_and_children_response.py new file mode 100644 index 00000000..27af28b6 --- /dev/null +++ b/src/humanloop/requests/directory_with_parents_and_children_response.py @@ -0,0 +1,64 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from .directory_response import DirectoryResponseParams +from .directory_with_parents_and_children_response_files_item import ( + DirectoryWithParentsAndChildrenResponseFilesItemParams, +) + + +class DirectoryWithParentsAndChildrenResponseParams(typing_extensions.TypedDict): + id: str + """ + String ID of directory. Starts with `dir_`. + """ + + parent_id: typing_extensions.NotRequired[str] + """ + ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. + """ + + name: str + """ + Name of the directory. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the directory. + """ + + path: str + """ + Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the directory. + """ + + tags: typing.Sequence[str] + """ + List of tags associated with the directory. + """ + + created_at: dt.datetime + updated_at: dt.datetime + subdirectories: typing.Sequence[DirectoryResponseParams] + """ + List of subdirectories in the directory. + """ + + files: typing.Sequence[DirectoryWithParentsAndChildrenResponseFilesItemParams] + """ + List of files in the directory. + """ + + parents: typing.Sequence[DirectoryResponseParams] + """ + List of parent directories of the directory. + """ diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py new file mode 100644 index 00000000..1ebe44fc --- /dev/null +++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponseParams +from .dataset_response import DatasetResponseParams +from .evaluator_response import EvaluatorResponseParams +from .flow_response import FlowResponseParams +from .prompt_response import PromptResponseParams +from .tool_response import ToolResponseParams + +DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[ + PromptResponseParams, + ToolResponseParams, + EvaluatorResponseParams, + DatasetResponseParams, + FlowResponseParams, + AgentResponseParams, +] diff --git a/src/humanloop/requests/environment_response.py b/src/humanloop/requests/environment_response.py new file mode 100644 index 00000000..0c74481e --- /dev/null +++ b/src/humanloop/requests/environment_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from ..types.environment_tag import EnvironmentTag + + +class EnvironmentResponseParams(typing_extensions.TypedDict): + id: str + created_at: dt.datetime + name: str + tag: EnvironmentTag diff --git a/src/humanloop/requests/evaluatee_request.py b/src/humanloop/requests/evaluatee_request.py new file mode 100644 index 00000000..d7544be1 --- /dev/null +++ b/src/humanloop/requests/evaluatee_request.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class EvaluateeRequestParams(typing_extensions.TypedDict): + """ + Specification of a File version on Humanloop. + + This can be done in a couple of ways: + - Specifying `version_id` directly. + - Specifying a File (and optionally an Environment). + - A File can be specified by either `path` or `file_id`. + - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used. + """ + + version_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the File Version. If provided, none of the other fields should be specified. + """ + + path: typing_extensions.NotRequired[str] + """ + Path identifying a File. Provide either this or `file_id` if you want to specify a File. + """ + + file_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the File. Provide either this or `path` if you want to specify a File. + """ + + environment: typing_extensions.NotRequired[str] + """ + Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used. + """ + + batch_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + orchestrated: typing_extensions.NotRequired[bool] + """ + Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + """ diff --git a/src/humanloop/requests/evaluatee_response.py b/src/humanloop/requests/evaluatee_response.py new file mode 100644 index 00000000..fb860a37 --- /dev/null +++ b/src/humanloop/requests/evaluatee_response.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from .run_version_response import RunVersionResponseParams + + +class EvaluateeResponseParams(typing_extensions.TypedDict): + """ + Version of the Evaluatee being evaluated. + """ + + version: typing_extensions.NotRequired[RunVersionResponseParams] + batch_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + orchestrated: bool + """ + Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + """ + + pinned: bool + """ + Pinned Evaluatees are shown in Humanloop's Overview, allowing you to use them as baselines for comparison. + """ + + added_at: typing_extensions.NotRequired[dt.datetime] + """ + When the Evaluatee was added to the Evaluation. + """ diff --git a/src/humanloop/requests/evaluation_evaluator_response.py b/src/humanloop/requests/evaluation_evaluator_response.py new file mode 100644 index 00000000..3d40ba33 --- /dev/null +++ b/src/humanloop/requests/evaluation_evaluator_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from .evaluator_response import EvaluatorResponseParams + + +class EvaluationEvaluatorResponseParams(typing_extensions.TypedDict): + version: EvaluatorResponseParams + orchestrated: bool + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ + + added_at: dt.datetime + """ + When the Evaluator was added to the Evaluation. + """ diff --git a/src/humanloop/requests/evaluation_log_response.py b/src/humanloop/requests/evaluation_log_response.py new file mode 100644 index 00000000..5bbd0649 --- /dev/null +++ b/src/humanloop/requests/evaluation_log_response.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .datapoint_response import DatapointResponseParams +from .log_response import LogResponseParams + + +class EvaluationLogResponseParams(typing_extensions.TypedDict): + run_id: str + """ + Unique identifier for the Run. + """ + + datapoint: typing_extensions.NotRequired[DatapointResponseParams] + """ + The Datapoint used to generate the Log + """ + + log: LogResponseParams + """ + The Log that was evaluated by the Evaluator. + """ + + evaluator_logs: typing.Sequence[LogResponseParams] + """ + The Evaluator Logs containing the judgments for the Log. + """ diff --git a/src/humanloop/requests/evaluation_response.py b/src/humanloop/requests/evaluation_response.py new file mode 100644 index 00000000..4c077927 --- /dev/null +++ b/src/humanloop/requests/evaluation_response.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from ..types.user_response import UserResponse +from .evaluation_evaluator_response import EvaluationEvaluatorResponseParams + + +class EvaluationResponseParams(typing_extensions.TypedDict): + id: str + """ + Unique identifier for the Evaluation. Starts with `evr`. + """ + + runs_count: int + """ + The total number of Runs in the Evaluation. + """ + + evaluators: typing.Sequence[EvaluationEvaluatorResponseParams] + """ + The Evaluator Versions used to evaluate. + """ + + name: typing_extensions.NotRequired[str] + """ + Name of the Evaluation to help identify it. Must be unique among Evaluations associated with File. + """ + + file_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the File associated with the Evaluation. + """ + + created_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + updated_at: dt.datetime + url: typing_extensions.NotRequired[str] + """ + URL to view the Evaluation on the Humanloop. + """ diff --git a/src/humanloop/requests/evaluation_run_response.py b/src/humanloop/requests/evaluation_run_response.py new file mode 100644 index 00000000..5dd7c782 --- /dev/null +++ b/src/humanloop/requests/evaluation_run_response.py @@ -0,0 +1,56 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from ..types.evaluation_status import EvaluationStatus +from ..types.user_response import UserResponse +from .dataset_response import DatasetResponseParams +from .run_version_response import RunVersionResponseParams + + +class EvaluationRunResponseParams(typing_extensions.TypedDict): + id: str + """ + Unique identifier for the Run. + """ + + dataset: typing_extensions.NotRequired[DatasetResponseParams] + """ + The Dataset used in the Run. + """ + + version: typing_extensions.NotRequired[RunVersionResponseParams] + """ + The version used in the Run. + """ + + orchestrated: bool + """ + Whether the Run is orchestrated by Humanloop. + """ + + added_at: dt.datetime + """ + When the Run was added to the Evaluation. + """ + + created_at: dt.datetime + """ + When the Run was created. + """ + + created_by: typing_extensions.NotRequired[UserResponse] + """ + The User who created the Run. + """ + + status: EvaluationStatus + """ + The status of the Run. + """ + + control: bool + """ + Stats for other Runs will be displayed in comparison to the control Run. + """ diff --git a/src/humanloop/requests/evaluation_runs_response.py b/src/humanloop/requests/evaluation_runs_response.py new file mode 100644 index 00000000..fd3d4792 --- /dev/null +++ b/src/humanloop/requests/evaluation_runs_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .evaluation_run_response import EvaluationRunResponseParams + + +class EvaluationRunsResponseParams(typing_extensions.TypedDict): + runs: typing.Sequence[EvaluationRunResponseParams] + """ + The Runs in the Evaluation. + """ diff --git a/src/humanloop/requests/evaluation_stats.py b/src/humanloop/requests/evaluation_stats.py new file mode 100644 index 00000000..edd56e15 --- /dev/null +++ b/src/humanloop/requests/evaluation_stats.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.evaluation_status import EvaluationStatus +from .run_stats_response import RunStatsResponseParams + + +class EvaluationStatsParams(typing_extensions.TypedDict): + run_stats: typing.Sequence[RunStatsResponseParams] + """ + Stats for each Run in the Evaluation. + """ + + progress: typing_extensions.NotRequired[str] + """ + A summary string report of the Evaluation's progress you can print to the command line;helpful when integrating Evaluations with CI/CD. + """ + + report: typing_extensions.NotRequired[str] + """ + A summary string report of the Evaluation you can print to command line;helpful when integrating Evaluations with CI/CD. + """ + + status: EvaluationStatus + """ + The current status of the Evaluation. + """ diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request.py b/src/humanloop/requests/evaluator_activation_deactivation_request.py new file mode 100644 index 00000000..b3f3f91d --- /dev/null +++ b/src/humanloop/requests/evaluator_activation_deactivation_request.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from .evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) + + +class EvaluatorActivationDeactivationRequestParams(typing_extensions.TypedDict): + activate: typing_extensions.NotRequired[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + """ + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + """ + + deactivate: typing_extensions.NotRequired[ + typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] + ] + """ + Evaluators to deactivate. These will not be run on new Logs. + """ diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py b/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py new file mode 100644 index 00000000..049c4cc8 --- /dev/null +++ b/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams +from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams + +EvaluatorActivationDeactivationRequestActivateItemParams = typing.Union[ + MonitoringEvaluatorVersionRequestParams, MonitoringEvaluatorEnvironmentRequestParams +] diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py b/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py new file mode 100644 index 00000000..4a21dcaf --- /dev/null +++ b/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams +from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams + +EvaluatorActivationDeactivationRequestDeactivateItemParams = typing.Union[ + MonitoringEvaluatorVersionRequestParams, MonitoringEvaluatorEnvironmentRequestParams +] diff --git a/src/humanloop/requests/evaluator_aggregate.py b/src/humanloop/requests/evaluator_aggregate.py new file mode 100644 index 00000000..f8840d4f --- /dev/null +++ b/src/humanloop/requests/evaluator_aggregate.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions + + +class EvaluatorAggregateParams(typing_extensions.TypedDict): + value: float + """ + The aggregated value of the evaluator. + """ + + evaluator_id: str + """ + ID of the evaluator. + """ + + evaluator_version_id: str + """ + ID of the evaluator version. + """ + + created_at: dt.datetime + updated_at: dt.datetime diff --git a/src/humanloop/requests/evaluator_config_response.py b/src/humanloop/requests/evaluator_config_response.py new file mode 100644 index 00000000..de75afcf --- /dev/null +++ b/src/humanloop/requests/evaluator_config_response.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class EvaluatorConfigResponseParams(typing_extensions.TypedDict): + pass diff --git a/src/humanloop/requests/evaluator_file_id.py b/src/humanloop/requests/evaluator_file_id.py new file mode 100644 index 00000000..952eda84 --- /dev/null +++ b/src/humanloop/requests/evaluator_file_id.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class EvaluatorFileIdParams(typing_extensions.TypedDict): + """ + Base model for specifying an Evaluator for an Evaluation. + """ + + environment: typing_extensions.NotRequired[str] + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + id: str + """ + Unique identifier for the File. + """ + + orchestrated: typing_extensions.NotRequired[bool] + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ diff --git a/src/humanloop/requests/evaluator_file_path.py b/src/humanloop/requests/evaluator_file_path.py new file mode 100644 index 00000000..0b1a06c9 --- /dev/null +++ b/src/humanloop/requests/evaluator_file_path.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class EvaluatorFilePathParams(typing_extensions.TypedDict): + """ + Base model for specifying an Evaluator for an Evaluation. + """ + + environment: typing_extensions.NotRequired[str] + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + path: str + """ + Path identifying a File. Provide this to specify a File. + """ + + orchestrated: typing_extensions.NotRequired[bool] + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ diff --git a/src/humanloop/requests/evaluator_judgment_number_limit.py b/src/humanloop/requests/evaluator_judgment_number_limit.py new file mode 100644 index 00000000..3cdd87db --- /dev/null +++ b/src/humanloop/requests/evaluator_judgment_number_limit.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class EvaluatorJudgmentNumberLimitParams(typing_extensions.TypedDict): + min: typing_extensions.NotRequired[float] + """ + The minimum value that can be selected. + """ + + max: typing_extensions.NotRequired[float] + """ + The maximum value that can be selected. + """ + + step: typing_extensions.NotRequired[float] + """ + The step size for the number input. + """ diff --git a/src/humanloop/requests/evaluator_judgment_option_response.py b/src/humanloop/requests/evaluator_judgment_option_response.py new file mode 100644 index 00000000..77724406 --- /dev/null +++ b/src/humanloop/requests/evaluator_judgment_option_response.py @@ -0,0 +1,16 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.valence import Valence + + +class EvaluatorJudgmentOptionResponseParams(typing_extensions.TypedDict): + name: str + """ + The name of the option. + """ + + valence: typing_extensions.NotRequired[Valence] + """ + Whether this option should be considered positive or negative. + """ diff --git a/src/humanloop/requests/evaluator_log_response.py b/src/humanloop/requests/evaluator_log_response.py new file mode 100644 index 00000000..c434280e --- /dev/null +++ b/src/humanloop/requests/evaluator_log_response.py @@ -0,0 +1,176 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .chat_message import ChatMessageParams +from .evaluator_log_response_judgment import EvaluatorLogResponseJudgmentParams +from .evaluator_response import EvaluatorResponseParams + +if typing.TYPE_CHECKING: + from .log_response import LogResponseParams + + +class EvaluatorLogResponseParams(typing_extensions.TypedDict): + """ + General request for creating a Log + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + parent_id: typing_extensions.NotRequired[str] + """ + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the LLM. Only populated for LLM Evaluator Logs. + """ + + judgment: typing_extensions.NotRequired[EvaluatorLogResponseJudgmentParams] + """ + Evaluator assessment of the Log. + """ + + marked_completed: typing_extensions.NotRequired[bool] + """ + Whether the Log has been manually marked as completed by a user. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] + """ + Logs nested under this Log in the Trace. + """ + + evaluator: EvaluatorResponseParams + """ + Evaluator used to generate the judgment. + """ + + parent: typing_extensions.NotRequired["LogResponseParams"] + """ + The Log that was evaluated. Only provided if the ?include_parent query parameter is set for the + """ diff --git a/src/humanloop/requests/evaluator_log_response_judgment.py b/src/humanloop/requests/evaluator_log_response_judgment.py new file mode 100644 index 00000000..8958f7d9 --- /dev/null +++ b/src/humanloop/requests/evaluator_log_response_judgment.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluatorLogResponseJudgmentParams = typing.Union[bool, str, typing.Sequence[str], float] diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py new file mode 100644 index 00000000..38093ae5 --- /dev/null +++ b/src/humanloop/requests/evaluator_response.py @@ -0,0 +1,122 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.user_response import UserResponse +from .environment_response import EnvironmentResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams +from .evaluator_response_spec import EvaluatorResponseSpecParams +from .input_response import InputResponseParams + +if typing.TYPE_CHECKING: + from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams + + +class EvaluatorResponseParams(typing_extensions.TypedDict): + """ + Version of the Evaluator used to provide judgments. + """ + + path: str + """ + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Evaluator. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. + """ + + spec: EvaluatorResponseSpecParams + name: str + """ + Name of the Evaluator, which is used as a unique identifier. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Evaluator. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + version_id: str + """ + Unique identifier for the specific Evaluator Version. If no query params provided, the default deployed Evaluator Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["evaluator"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Evaluator Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Evaluator. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Evaluator Version + """ + + total_logs_count: int + """ + The number of logs that have been generated across all Evaluator Versions + """ + + inputs: typing.Sequence[InputResponseParams] + """ + Inputs associated to the Evaluator. Inputs correspond to any of the variables used within the Evaluator template. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] + """ + Evaluators that have been attached to this Evaluator that are used for monitoring logs. + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Evaluator Version. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ diff --git a/src/humanloop/requests/evaluator_response_spec.py b/src/humanloop/requests/evaluator_response_spec.py new file mode 100644 index 00000000..72cf3d82 --- /dev/null +++ b/src/humanloop/requests/evaluator_response_spec.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .code_evaluator_request import CodeEvaluatorRequestParams +from .external_evaluator_request import ExternalEvaluatorRequestParams +from .human_evaluator_request import HumanEvaluatorRequestParams +from .llm_evaluator_request import LlmEvaluatorRequestParams + +EvaluatorResponseSpecParams = typing.Union[ + LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams +] diff --git a/src/humanloop/requests/evaluator_version_id.py b/src/humanloop/requests/evaluator_version_id.py new file mode 100644 index 00000000..94700595 --- /dev/null +++ b/src/humanloop/requests/evaluator_version_id.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class EvaluatorVersionIdParams(typing_extensions.TypedDict): + """ + Base model for specifying an Evaluator for an Evaluation. + """ + + version_id: str + """ + Unique identifier for the Version. + """ + + orchestrated: typing_extensions.NotRequired[bool] + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ diff --git a/src/humanloop/requests/external_evaluator_request.py b/src/humanloop/requests/external_evaluator_request.py new file mode 100644 index 00000000..6e77103f --- /dev/null +++ b/src/humanloop/requests/external_evaluator_request.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.evaluator_arguments_type import EvaluatorArgumentsType +from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum +from ..types.valence import Valence +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams + + +class ExternalEvaluatorRequestParams(typing_extensions.TypedDict): + arguments_type: EvaluatorArgumentsType + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: EvaluatorReturnTypeEnum + """ + The type of the return value of the Evaluator. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] + """ + The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. + """ + + number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing_extensions.NotRequired[Valence] + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["external"] diff --git a/src/humanloop/requests/file_environment_response.py b/src/humanloop/requests/file_environment_response.py new file mode 100644 index 00000000..40b60bc7 --- /dev/null +++ b/src/humanloop/requests/file_environment_response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from ..types.environment_tag import EnvironmentTag +from .file_environment_response_file import FileEnvironmentResponseFileParams + + +class FileEnvironmentResponseParams(typing_extensions.TypedDict): + """ + Response model for the List Environments endpoint under Files. + + Contains the deployed version of the File, if one is deployed to the Environment. + """ + + id: str + created_at: dt.datetime + name: str + tag: EnvironmentTag + file: typing_extensions.NotRequired[FileEnvironmentResponseFileParams] + """ + The version of the File that is deployed to the Environment, if one is deployed. + """ diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py new file mode 100644 index 00000000..1a2021cb --- /dev/null +++ b/src/humanloop/requests/file_environment_response_file.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponseParams +from .dataset_response import DatasetResponseParams +from .evaluator_response import EvaluatorResponseParams +from .flow_response import FlowResponseParams +from .prompt_response import PromptResponseParams +from .tool_response import ToolResponseParams + +FileEnvironmentResponseFileParams = typing.Union[ + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, +] diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py new file mode 100644 index 00000000..bb70bda4 --- /dev/null +++ b/src/humanloop/requests/file_environment_variable_request.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict): + name: str + """ + Name of the environment variable. + """ + + value: str + """ + Value of the environment variable. + """ diff --git a/src/humanloop/requests/file_id.py b/src/humanloop/requests/file_id.py new file mode 100644 index 00000000..d6c39755 --- /dev/null +++ b/src/humanloop/requests/file_id.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FileIdParams(typing_extensions.TypedDict): + """ + Specification of a File by its ID. + """ + + environment: typing_extensions.NotRequired[str] + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + id: str + """ + Unique identifier for the File. + """ diff --git a/src/humanloop/requests/file_path.py b/src/humanloop/requests/file_path.py new file mode 100644 index 00000000..238927d8 --- /dev/null +++ b/src/humanloop/requests/file_path.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FilePathParams(typing_extensions.TypedDict): + """ + Specification of a File by its path. + """ + + environment: typing_extensions.NotRequired[str] + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + path: str + """ + Path identifying a File. Provide this to specify a File. + """ diff --git a/src/humanloop/requests/file_request.py b/src/humanloop/requests/file_request.py new file mode 100644 index 00000000..91e730d6 --- /dev/null +++ b/src/humanloop/requests/file_request.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FileRequestParams(typing_extensions.TypedDict): + id: typing_extensions.NotRequired[str] + """ + ID for an existing File. + """ + + path: typing_extensions.NotRequired[str] + """ + Path of the File, including the name. This locates the File in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + """ diff --git a/src/humanloop/requests/flow_kernel_request.py b/src/humanloop/requests/flow_kernel_request.py new file mode 100644 index 00000000..0a2b7993 --- /dev/null +++ b/src/humanloop/requests/flow_kernel_request.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class FlowKernelRequestParams(typing_extensions.TypedDict): + attributes: typing.Dict[str, typing.Optional[typing.Any]] + """ + A key-value object identifying the Flow Version. + """ diff --git a/src/humanloop/requests/flow_log_response.py b/src/humanloop/requests/flow_log_response.py new file mode 100644 index 00000000..661fc301 --- /dev/null +++ b/src/humanloop/requests/flow_log_response.py @@ -0,0 +1,161 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .chat_message import ChatMessageParams +from .flow_response import FlowResponseParams + +if typing.TYPE_CHECKING: + from .evaluator_log_response import EvaluatorLogResponseParams + from .log_response import LogResponseParams + + +class FlowLogResponseParams(typing_extensions.TypedDict): + """ + General request for creating a Log + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + List of chat messages that were used as an input to the Flow. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The output message returned by this Flow. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the Flow Log. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] + """ + Logs nested under this Log in the Trace. + """ + + flow: FlowResponseParams + """ + Flow used to generate the Log. + """ diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py new file mode 100644 index 00000000..19087c61 --- /dev/null +++ b/src/humanloop/requests/flow_response.py @@ -0,0 +1,109 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.user_response import UserResponse +from .environment_response import EnvironmentResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams + +if typing.TYPE_CHECKING: + from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams + + +class FlowResponseParams(typing_extensions.TypedDict): + """ + Response model for a Flow. + """ + + path: str + """ + Path of the Flow, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Flow. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + attributes: typing.Dict[str, typing.Optional[typing.Any]] + """ + A key-value object identifying the Flow Version. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Flow version. Version names must be unique for a given Flow. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the Version. + """ + + name: str + """ + Name of the Flow. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Flow. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + version_id: str + """ + Unique identifier for the specific Flow Version. If no query params provided, the default deployed Flow Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["flow"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Flow Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Flow. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Flow Version + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Flow Version. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] + """ + The list of Monitoring Evaluators associated with the Flow Version. + """ diff --git a/src/humanloop/requests/function_tool.py b/src/humanloop/requests/function_tool.py new file mode 100644 index 00000000..473b2b6e --- /dev/null +++ b/src/humanloop/requests/function_tool.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FunctionToolParams(typing_extensions.TypedDict): + """ + A function tool to be called by the model where user owns runtime. + """ + + name: str + arguments: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/function_tool_choice.py b/src/humanloop/requests/function_tool_choice.py new file mode 100644 index 00000000..4b1c6c47 --- /dev/null +++ b/src/humanloop/requests/function_tool_choice.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FunctionToolChoiceParams(typing_extensions.TypedDict): + """ + A function tool to be called by the model where user owns runtime. + """ + + name: str diff --git a/src/humanloop/requests/http_validation_error.py b/src/humanloop/requests/http_validation_error.py new file mode 100644 index 00000000..7b0ed08f --- /dev/null +++ b/src/humanloop/requests/http_validation_error.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .validation_error import ValidationErrorParams + + +class HttpValidationErrorParams(typing_extensions.TypedDict): + detail: typing_extensions.NotRequired[typing.Sequence[ValidationErrorParams]] diff --git a/src/humanloop/requests/human_evaluator_request.py b/src/humanloop/requests/human_evaluator_request.py new file mode 100644 index 00000000..9bd32e2d --- /dev/null +++ b/src/humanloop/requests/human_evaluator_request.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.evaluator_arguments_type import EvaluatorArgumentsType +from ..types.human_evaluator_request_return_type import HumanEvaluatorRequestReturnType +from ..types.valence import Valence +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams + + +class HumanEvaluatorRequestParams(typing_extensions.TypedDict): + arguments_type: EvaluatorArgumentsType + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: HumanEvaluatorRequestReturnType + """ + The type of the return value of the Evaluator. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] + """ + The options that can be applied as judgments. + """ + + number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing_extensions.NotRequired[Valence] + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["human"] + instructions: typing_extensions.NotRequired[str] + """ + Instructions and guidelines for applying judgments. + """ diff --git a/src/humanloop/requests/image_chat_content.py b/src/humanloop/requests/image_chat_content.py new file mode 100644 index 00000000..5dc1163e --- /dev/null +++ b/src/humanloop/requests/image_chat_content.py @@ -0,0 +1,14 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .image_url import ImageUrlParams + + +class ImageChatContentParams(typing_extensions.TypedDict): + type: typing.Literal["image_url"] + image_url: ImageUrlParams + """ + The message's image content. + """ diff --git a/src/humanloop/requests/image_url.py b/src/humanloop/requests/image_url.py new file mode 100644 index 00000000..9d2a671b --- /dev/null +++ b/src/humanloop/requests/image_url.py @@ -0,0 +1,16 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.image_url_detail import ImageUrlDetail + + +class ImageUrlParams(typing_extensions.TypedDict): + url: str + """ + Either a URL of the image or the base64 encoded image data. + """ + + detail: typing_extensions.NotRequired[ImageUrlDetail] + """ + Specify the detail level of the image provided to the model. For more details see: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding + """ diff --git a/src/humanloop/requests/input_response.py b/src/humanloop/requests/input_response.py new file mode 100644 index 00000000..ffc4874c --- /dev/null +++ b/src/humanloop/requests/input_response.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class InputResponseParams(typing_extensions.TypedDict): + name: str + """ + Type of input. + """ diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py new file mode 100644 index 00000000..58c44162 --- /dev/null +++ b/src/humanloop/requests/linked_file_request.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class LinkedFileRequestParams(typing_extensions.TypedDict): + file_id: str + environment_id: typing_extensions.NotRequired[str] + version_id: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/linked_tool_response.py b/src/humanloop/requests/linked_tool_response.py new file mode 100644 index 00000000..646549d9 --- /dev/null +++ b/src/humanloop/requests/linked_tool_response.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class LinkedToolResponseParams(typing_extensions.TypedDict): + name: str + """ + Name for the tool referenced by the model. + """ + + description: str + """ + Description of the tool referenced by the model + """ + + strict: typing_extensions.NotRequired[bool] + """ + If true, forces the model to output json data in the structure of the parameters schema. + """ + + parameters: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ + """ + + id: str + """ + Unique identifier for the Tool linked. + """ + + version_id: str + """ + Unique identifier for the Tool Version linked. + """ diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py new file mode 100644 index 00000000..8e1d6b0e --- /dev/null +++ b/src/humanloop/requests/list_agents.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .agent_response import AgentResponseParams + + +class ListAgentsParams(typing_extensions.TypedDict): + records: typing.Sequence[AgentResponseParams] + """ + The list of Agents. + """ diff --git a/src/humanloop/requests/list_datasets.py b/src/humanloop/requests/list_datasets.py new file mode 100644 index 00000000..b49ea512 --- /dev/null +++ b/src/humanloop/requests/list_datasets.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .dataset_response import DatasetResponseParams + + +class ListDatasetsParams(typing_extensions.TypedDict): + records: typing.Sequence[DatasetResponseParams] + """ + The list of Datasets. + """ diff --git a/src/humanloop/requests/list_evaluators.py b/src/humanloop/requests/list_evaluators.py new file mode 100644 index 00000000..61d1aa46 --- /dev/null +++ b/src/humanloop/requests/list_evaluators.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .evaluator_response import EvaluatorResponseParams + + +class ListEvaluatorsParams(typing_extensions.TypedDict): + records: typing.Sequence[EvaluatorResponseParams] + """ + The list of Evaluators. + """ diff --git a/src/humanloop/requests/list_flows.py b/src/humanloop/requests/list_flows.py new file mode 100644 index 00000000..32b90142 --- /dev/null +++ b/src/humanloop/requests/list_flows.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .flow_response import FlowResponseParams + + +class ListFlowsParams(typing_extensions.TypedDict): + records: typing.Sequence[FlowResponseParams] + """ + The list of Flows. + """ diff --git a/src/humanloop/requests/list_prompts.py b/src/humanloop/requests/list_prompts.py new file mode 100644 index 00000000..717fd9eb --- /dev/null +++ b/src/humanloop/requests/list_prompts.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .prompt_response import PromptResponseParams + + +class ListPromptsParams(typing_extensions.TypedDict): + records: typing.Sequence[PromptResponseParams] + """ + The list of Prompts. + """ diff --git a/src/humanloop/requests/list_tools.py b/src/humanloop/requests/list_tools.py new file mode 100644 index 00000000..d12fe188 --- /dev/null +++ b/src/humanloop/requests/list_tools.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .tool_response import ToolResponseParams + + +class ListToolsParams(typing_extensions.TypedDict): + records: typing.Sequence[ToolResponseParams] + """ + The list of Tools. + """ diff --git a/src/humanloop/requests/llm_evaluator_request.py b/src/humanloop/requests/llm_evaluator_request.py new file mode 100644 index 00000000..fd4c6d29 --- /dev/null +++ b/src/humanloop/requests/llm_evaluator_request.py @@ -0,0 +1,49 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.evaluator_arguments_type import EvaluatorArgumentsType +from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum +from ..types.valence import Valence +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams +from .prompt_kernel_request import PromptKernelRequestParams + + +class LlmEvaluatorRequestParams(typing_extensions.TypedDict): + arguments_type: EvaluatorArgumentsType + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: EvaluatorReturnTypeEnum + """ + The type of the return value of the Evaluator. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing_extensions.NotRequired[typing.Sequence[EvaluatorJudgmentOptionResponseParams]] + """ + The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. + """ + + number_limits: typing_extensions.NotRequired[EvaluatorJudgmentNumberLimitParams] + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing_extensions.NotRequired[Valence] + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["llm"] + prompt: typing_extensions.NotRequired[PromptKernelRequestParams] + """ + The prompt parameters used to generate. + """ diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py new file mode 100644 index 00000000..3a1f56a0 --- /dev/null +++ b/src/humanloop/requests/log_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +if typing.TYPE_CHECKING: + from .agent_log_response import AgentLogResponseParams + from .evaluator_log_response import EvaluatorLogResponseParams + from .flow_log_response import FlowLogResponseParams + from .prompt_log_response import PromptLogResponseParams + from .tool_log_response import ToolLogResponseParams +LogResponseParams = typing.Union[ + "PromptLogResponseParams", + "ToolLogResponseParams", + "EvaluatorLogResponseParams", + "FlowLogResponseParams", + "AgentLogResponseParams", +] diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py new file mode 100644 index 00000000..2a9b1952 --- /dev/null +++ b/src/humanloop/requests/log_stream_response.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_log_stream_response import AgentLogStreamResponseParams +from .prompt_call_stream_response import PromptCallStreamResponseParams + +LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams] diff --git a/src/humanloop/requests/monitoring_evaluator_environment_request.py b/src/humanloop/requests/monitoring_evaluator_environment_request.py new file mode 100644 index 00000000..b0505ada --- /dev/null +++ b/src/humanloop/requests/monitoring_evaluator_environment_request.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class MonitoringEvaluatorEnvironmentRequestParams(typing_extensions.TypedDict): + evaluator_id: str + """ + Unique identifier for the Evaluator to be used for monitoring. + """ + + environment_id: str + """ + Unique identifier for the Environment. The Evaluator Version deployed to this Environment will be used for monitoring. + """ diff --git a/src/humanloop/requests/monitoring_evaluator_response.py b/src/humanloop/requests/monitoring_evaluator_response.py new file mode 100644 index 00000000..c946fc65 --- /dev/null +++ b/src/humanloop/requests/monitoring_evaluator_response.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.monitoring_evaluator_state import MonitoringEvaluatorState + +if typing.TYPE_CHECKING: + from .evaluator_response import EvaluatorResponseParams + from .version_reference_response import VersionReferenceResponseParams + + +class MonitoringEvaluatorResponseParams(typing_extensions.TypedDict): + version_reference: "VersionReferenceResponseParams" + """ + The Evaluator Version used for monitoring. This can be a specific Version by ID, or a Version deployed to an Environment. + """ + + version: typing_extensions.NotRequired["EvaluatorResponseParams"] + """ + The deployed Version. + """ + + state: MonitoringEvaluatorState + """ + The state of the Monitoring Evaluator. Either `active` or `inactive` + """ + + created_at: dt.datetime + updated_at: dt.datetime diff --git a/src/humanloop/requests/monitoring_evaluator_version_request.py b/src/humanloop/requests/monitoring_evaluator_version_request.py new file mode 100644 index 00000000..aa37c3ea --- /dev/null +++ b/src/humanloop/requests/monitoring_evaluator_version_request.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class MonitoringEvaluatorVersionRequestParams(typing_extensions.TypedDict): + evaluator_version_id: str + """ + Unique identifier for the Evaluator Version to be used for monitoring. + """ diff --git a/src/humanloop/requests/numeric_evaluator_stats_response.py b/src/humanloop/requests/numeric_evaluator_stats_response.py new file mode 100644 index 00000000..a74784ce --- /dev/null +++ b/src/humanloop/requests/numeric_evaluator_stats_response.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class NumericEvaluatorStatsResponseParams(typing_extensions.TypedDict): + """ + Base attributes for stats for an Evaluator Version-Evaluated Version pair + in the Evaluation. + """ + + evaluator_version_id: str + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int + """ + The total number of errored Evaluators for this Evaluator Version. + """ + + mean: typing_extensions.NotRequired[float] + sum: typing_extensions.NotRequired[float] + std: typing_extensions.NotRequired[float] + percentiles: typing.Dict[str, float] diff --git a/src/humanloop/requests/overall_stats.py b/src/humanloop/requests/overall_stats.py new file mode 100644 index 00000000..fd42a922 --- /dev/null +++ b/src/humanloop/requests/overall_stats.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class OverallStatsParams(typing_extensions.TypedDict): + num_datapoints: int + """ + The total number of Datapoints in the Evaluation's Dataset Version. + """ + + total_logs: int + """ + The total number of Logs in the Evaluation. + """ + + total_evaluator_logs: int + """ + The total number of Evaluator Logs in the Evaluation. + """ + + total_human_evaluator_logs: int + """ + The total number of human Evaluator Logs in the Evaluation Report. + """ + + total_completed_human_evaluator_logs: int + """ + The total number of non-None human Evaluator Logs in the Evaluation Report. + """ diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py new file mode 100644 index 00000000..af318b6a --- /dev/null +++ b/src/humanloop/requests/paginated_data_agent_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .agent_response import AgentResponseParams + + +class PaginatedDataAgentResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[AgentResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_evaluation_log_response.py b/src/humanloop/requests/paginated_data_evaluation_log_response.py new file mode 100644 index 00000000..61439b55 --- /dev/null +++ b/src/humanloop/requests/paginated_data_evaluation_log_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .evaluation_log_response import EvaluationLogResponseParams + + +class PaginatedDataEvaluationLogResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[EvaluationLogResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_evaluator_response.py b/src/humanloop/requests/paginated_data_evaluator_response.py new file mode 100644 index 00000000..15294571 --- /dev/null +++ b/src/humanloop/requests/paginated_data_evaluator_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .evaluator_response import EvaluatorResponseParams + + +class PaginatedDataEvaluatorResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[EvaluatorResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_flow_response.py b/src/humanloop/requests/paginated_data_flow_response.py new file mode 100644 index 00000000..51db6406 --- /dev/null +++ b/src/humanloop/requests/paginated_data_flow_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .flow_response import FlowResponseParams + + +class PaginatedDataFlowResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[FlowResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_log_response.py b/src/humanloop/requests/paginated_data_log_response.py new file mode 100644 index 00000000..450f2d0e --- /dev/null +++ b/src/humanloop/requests/paginated_data_log_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .log_response import LogResponseParams + + +class PaginatedDataLogResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[LogResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_prompt_response.py b/src/humanloop/requests/paginated_data_prompt_response.py new file mode 100644 index 00000000..62eae52b --- /dev/null +++ b/src/humanloop/requests/paginated_data_prompt_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .prompt_response import PromptResponseParams + + +class PaginatedDataPromptResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[PromptResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_tool_response.py b/src/humanloop/requests/paginated_data_tool_response.py new file mode 100644 index 00000000..41eaf15a --- /dev/null +++ b/src/humanloop/requests/paginated_data_tool_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .tool_response import ToolResponseParams + + +class PaginatedDataToolResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[ToolResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py new file mode 100644 index 00000000..5bde00b9 --- /dev/null +++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, +) + + +class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams( + typing_extensions.TypedDict +): + records: typing.Sequence[ + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams + ] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py new file mode 100644 index 00000000..51db2493 --- /dev/null +++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponseParams +from .dataset_response import DatasetResponseParams +from .evaluator_response import EvaluatorResponseParams +from .flow_response import FlowResponseParams +from .prompt_response import PromptResponseParams +from .tool_response import ToolResponseParams + +PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[ + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, +] diff --git a/src/humanloop/requests/paginated_datapoint_response.py b/src/humanloop/requests/paginated_datapoint_response.py new file mode 100644 index 00000000..5ef2bae4 --- /dev/null +++ b/src/humanloop/requests/paginated_datapoint_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .datapoint_response import DatapointResponseParams + + +class PaginatedDatapointResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[DatapointResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_dataset_response.py b/src/humanloop/requests/paginated_dataset_response.py new file mode 100644 index 00000000..ea5cd5b1 --- /dev/null +++ b/src/humanloop/requests/paginated_dataset_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .dataset_response import DatasetResponseParams + + +class PaginatedDatasetResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[DatasetResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_evaluation_response.py b/src/humanloop/requests/paginated_evaluation_response.py new file mode 100644 index 00000000..30916a81 --- /dev/null +++ b/src/humanloop/requests/paginated_evaluation_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .evaluation_response import EvaluationResponseParams + + +class PaginatedEvaluationResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[EvaluationResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py new file mode 100644 index 00000000..a6ed2b2f --- /dev/null +++ b/src/humanloop/requests/populate_template_response.py @@ -0,0 +1,229 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.template_language import TemplateLanguage +from ..types.user_response import UserResponse +from .environment_response import EnvironmentResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams +from .input_response import InputResponseParams +from .linked_tool_response import LinkedToolResponseParams +from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams +from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams +from .populate_template_response_stop import PopulateTemplateResponseStopParams +from .populate_template_response_template import PopulateTemplateResponseTemplateParams +from .response_format import ResponseFormatParams +from .tool_function import ToolFunctionParams + + +class PopulateTemplateResponseParams(typing_extensions.TypedDict): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str + """ + Path of the Prompt, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Prompt. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[PopulateTemplateResponseTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[PopulateTemplateResponseStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams] + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] + """ + The tool specification that the model can choose to call if Tool calling is supported. + """ + + linked_tools: typing_extensions.NotRequired[typing.Sequence[LinkedToolResponseParams]] + """ + The tools linked to your prompt that the model can call. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Prompt. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + name: str + """ + Name of the Prompt. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + + version_id: str + """ + Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["prompt"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Prompt Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Prompt. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Prompt Version + """ + + total_logs_count: int + """ + The number of logs that have been generated across all Prompt Versions + """ + + inputs: typing.Sequence[InputResponseParams] + """ + Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence[MonitoringEvaluatorResponseParams]] + """ + Evaluators that have been attached to this Prompt that are used for monitoring logs. + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Prompt Version. + """ + + raw_file_content: typing_extensions.NotRequired[str] + """ + The raw content of the Prompt. Corresponds to the .prompt file. + """ + + populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams] + """ + The template populated with the input values you provided in the request. Returns None if no template exists. + """ diff --git a/src/humanloop/requests/populate_template_response_populated_template.py b/src/humanloop/requests/populate_template_response_populated_template.py new file mode 100644 index 00000000..79bc7505 --- /dev/null +++ b/src/humanloop/requests/populate_template_response_populated_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessageParams + +PopulateTemplateResponsePopulatedTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py new file mode 100644 index 00000000..9140180f --- /dev/null +++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/populate_template_response_stop.py b/src/humanloop/requests/populate_template_response_stop.py new file mode 100644 index 00000000..d4f19110 --- /dev/null +++ b/src/humanloop/requests/populate_template_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PopulateTemplateResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/populate_template_response_template.py b/src/humanloop/requests/populate_template_response_template.py new file mode 100644 index 00000000..7a9ba8e9 --- /dev/null +++ b/src/humanloop/requests/populate_template_response_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessageParams + +PopulateTemplateResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/prompt_call_log_response.py b/src/humanloop/requests/prompt_call_log_response.py new file mode 100644 index 00000000..4dff347b --- /dev/null +++ b/src/humanloop/requests/prompt_call_log_response.py @@ -0,0 +1,77 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from .chat_message import ChatMessageParams + + +class PromptCallLogResponseParams(typing_extensions.TypedDict): + """ + Sample specific response details for a Prompt call + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + index: int + """ + The index of the sample in the batch. + """ diff --git a/src/humanloop/requests/prompt_call_response.py b/src/humanloop/requests/prompt_call_response.py new file mode 100644 index 00000000..14ff4609 --- /dev/null +++ b/src/humanloop/requests/prompt_call_response.py @@ -0,0 +1,111 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .chat_message import ChatMessageParams +from .prompt_call_log_response import PromptCallLogResponseParams +from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams +from .prompt_response import PromptResponseParams + + +class PromptCallResponseParams(typing_extensions.TypedDict): + """ + Response model for a Prompt call with potentially multiple log samples. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[PromptCallResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + prompt: PromptResponseParams + """ + Prompt used to generate the Log. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + ID of the log. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + ID of the Trace containing the Prompt Call Log. + """ + + logs: typing.Sequence[PromptCallLogResponseParams] + """ + The logs generated by the Prompt call. + """ diff --git a/src/humanloop/requests/prompt_call_response_tool_choice.py b/src/humanloop/requests/prompt_call_response_tool_choice.py new file mode 100644 index 00000000..63fd7183 --- /dev/null +++ b/src/humanloop/requests/prompt_call_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoiceParams + +PromptCallResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/prompt_call_stream_response.py b/src/humanloop/requests/prompt_call_stream_response.py new file mode 100644 index 00000000..9d3e5651 --- /dev/null +++ b/src/humanloop/requests/prompt_call_stream_response.py @@ -0,0 +1,92 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt + +import typing_extensions +from .chat_message import ChatMessageParams + + +class PromptCallStreamResponseParams(typing_extensions.TypedDict): + """ + Response model for calling Prompt in streaming mode. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + index: int + """ + The index of the sample in the batch. + """ + + id: str + """ + ID of the log. + """ + + prompt_id: str + """ + ID of the Prompt the log belongs to. + """ + + version_id: str + """ + ID of the specific version of the Prompt. + """ diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py new file mode 100644 index 00000000..48d8db46 --- /dev/null +++ b/src/humanloop/requests/prompt_kernel_request.py @@ -0,0 +1,116 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.template_language import TemplateLanguage +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams +from .prompt_kernel_request_stop import PromptKernelRequestStopParams +from .prompt_kernel_request_template import PromptKernelRequestTemplateParams +from .response_format import ResponseFormatParams +from .tool_function import ToolFunctionParams + + +class PromptKernelRequestParams(typing_extensions.TypedDict): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[PromptKernelRequestTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[PromptKernelRequestStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams] + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] + """ + The tool specification that the model can choose to call if Tool calling is supported. + """ + + linked_tools: typing_extensions.NotRequired[typing.Sequence[str]] + """ + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..81df2957 --- /dev/null +++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/prompt_kernel_request_stop.py b/src/humanloop/requests/prompt_kernel_request_stop.py new file mode 100644 index 00000000..c3db9e58 --- /dev/null +++ b/src/humanloop/requests/prompt_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PromptKernelRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/prompt_kernel_request_template.py b/src/humanloop/requests/prompt_kernel_request_template.py new file mode 100644 index 00000000..aa389a04 --- /dev/null +++ b/src/humanloop/requests/prompt_kernel_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessageParams + +PromptKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/prompt_log_response.py b/src/humanloop/requests/prompt_log_response.py new file mode 100644 index 00000000..6147adec --- /dev/null +++ b/src/humanloop/requests/prompt_log_response.py @@ -0,0 +1,201 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .chat_message import ChatMessageParams +from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams +from .prompt_response import PromptResponseParams + +if typing.TYPE_CHECKING: + from .evaluator_log_response import EvaluatorLogResponseParams + from .log_response import LogResponseParams + + +class PromptLogResponseParams(typing_extensions.TypedDict): + """ + General request for creating a Log + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[PromptLogResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + prompt: PromptResponseParams + """ + Prompt used to generate the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] + """ + Logs nested under this Log in the Trace. + """ diff --git a/src/humanloop/requests/prompt_log_response_tool_choice.py b/src/humanloop/requests/prompt_log_response_tool_choice.py new file mode 100644 index 00000000..8e8ad6dd --- /dev/null +++ b/src/humanloop/requests/prompt_log_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoiceParams + +PromptLogResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py new file mode 100644 index 00000000..7a1b4493 --- /dev/null +++ b/src/humanloop/requests/prompt_response.py @@ -0,0 +1,227 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from ..types.model_providers import ModelProviders +from ..types.template_language import TemplateLanguage +from ..types.user_response import UserResponse +from .environment_response import EnvironmentResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams +from .input_response import InputResponseParams +from .linked_tool_response import LinkedToolResponseParams +from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams +from .prompt_response_stop import PromptResponseStopParams +from .prompt_response_template import PromptResponseTemplateParams +from .response_format import ResponseFormatParams +from .tool_function import ToolFunctionParams + +if typing.TYPE_CHECKING: + from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams + + +class PromptResponseParams(typing_extensions.TypedDict): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str + """ + Path of the Prompt, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Prompt. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[PromptResponseTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[PromptResponseStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams] + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] + """ + The tool specification that the model can choose to call if Tool calling is supported. + """ + + linked_tools: typing_extensions.NotRequired[typing.Sequence[LinkedToolResponseParams]] + """ + The tools linked to your prompt that the model can call. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Prompt. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + name: str + """ + Name of the Prompt. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + + version_id: str + """ + Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["prompt"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Prompt Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Prompt. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Prompt Version + """ + + total_logs_count: int + """ + The number of logs that have been generated across all Prompt Versions + """ + + inputs: typing.Sequence[InputResponseParams] + """ + Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] + """ + Evaluators that have been attached to this Prompt that are used for monitoring logs. + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Prompt Version. + """ + + raw_file_content: typing_extensions.NotRequired[str] + """ + The raw content of the Prompt. Corresponds to the .prompt file. + """ diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py new file mode 100644 index 00000000..55d82486 --- /dev/null +++ b/src/humanloop/requests/prompt_response_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/prompt_response_stop.py b/src/humanloop/requests/prompt_response_stop.py new file mode 100644 index 00000000..c1545617 --- /dev/null +++ b/src/humanloop/requests/prompt_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PromptResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/prompt_response_template.py b/src/humanloop/requests/prompt_response_template.py new file mode 100644 index 00000000..b9f6deb4 --- /dev/null +++ b/src/humanloop/requests/prompt_response_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessageParams + +PromptResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/provider_api_keys.py b/src/humanloop/requests/provider_api_keys.py new file mode 100644 index 00000000..c37649ea --- /dev/null +++ b/src/humanloop/requests/provider_api_keys.py @@ -0,0 +1,16 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..core.serialization import FieldMetadata + + +class ProviderApiKeysParams(typing_extensions.TypedDict): + openai: typing_extensions.NotRequired[str] + ai_21: typing_extensions.NotRequired[typing_extensions.Annotated[str, FieldMetadata(alias="ai21")]] + mock: typing_extensions.NotRequired[str] + anthropic: typing_extensions.NotRequired[str] + deepseek: typing_extensions.NotRequired[str] + bedrock: typing_extensions.NotRequired[str] + cohere: typing_extensions.NotRequired[str] + openai_azure: typing_extensions.NotRequired[str] + openai_azure_endpoint: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/response_format.py b/src/humanloop/requests/response_format.py new file mode 100644 index 00000000..1fce8531 --- /dev/null +++ b/src/humanloop/requests/response_format.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.response_format_type import ResponseFormatType + + +class ResponseFormatParams(typing_extensions.TypedDict): + """ + Response format of the model. + """ + + type: ResponseFormatType + json_schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema of the response format if type is json_schema. + """ diff --git a/src/humanloop/requests/run_stats_response.py b/src/humanloop/requests/run_stats_response.py new file mode 100644 index 00000000..6bdbf08e --- /dev/null +++ b/src/humanloop/requests/run_stats_response.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.evaluation_status import EvaluationStatus +from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams + + +class RunStatsResponseParams(typing_extensions.TypedDict): + """ + Stats for a Run in the Evaluation. + """ + + run_id: str + """ + Unique identifier for the Run. + """ + + version_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the evaluated Version. + """ + + batch_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + num_logs: int + """ + The total number of existing Logs in this Run. + """ + + evaluator_stats: typing.Sequence[RunStatsResponseEvaluatorStatsItemParams] + """ + Stats for each Evaluator Version applied to this Run. + """ + + status: EvaluationStatus + """ + The current status of the Run. + """ diff --git a/src/humanloop/requests/run_stats_response_evaluator_stats_item.py b/src/humanloop/requests/run_stats_response_evaluator_stats_item.py new file mode 100644 index 00000000..09231c9b --- /dev/null +++ b/src/humanloop/requests/run_stats_response_evaluator_stats_item.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams +from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams +from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams +from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams + +RunStatsResponseEvaluatorStatsItemParams = typing.Union[ + NumericEvaluatorStatsResponseParams, + BooleanEvaluatorStatsResponseParams, + SelectEvaluatorStatsResponseParams, + TextEvaluatorStatsResponseParams, +] diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py new file mode 100644 index 00000000..3091de87 --- /dev/null +++ b/src/humanloop/requests/run_version_response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponseParams +from .evaluator_response import EvaluatorResponseParams +from .flow_response import FlowResponseParams +from .prompt_response import PromptResponseParams +from .tool_response import ToolResponseParams + +RunVersionResponseParams = typing.Union[ + PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams +] diff --git a/src/humanloop/requests/select_evaluator_stats_response.py b/src/humanloop/requests/select_evaluator_stats_response.py new file mode 100644 index 00000000..7c77198a --- /dev/null +++ b/src/humanloop/requests/select_evaluator_stats_response.py @@ -0,0 +1,41 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class SelectEvaluatorStatsResponseParams(typing_extensions.TypedDict): + """ + Also used for 'multi_select' Evaluator versions + """ + + evaluator_version_id: str + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int + """ + The total number of errored Evaluators for this Evaluator Version. + """ + + num_judgments_per_option: typing.Dict[str, int] + """ + The total number of Evaluator judgments for this Evaluator Version. This is a mapping of the option name to the number of judgments for that option. + """ diff --git a/src/humanloop/requests/text_chat_content.py b/src/humanloop/requests/text_chat_content.py new file mode 100644 index 00000000..fa9f5437 --- /dev/null +++ b/src/humanloop/requests/text_chat_content.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class TextChatContentParams(typing_extensions.TypedDict): + type: typing.Literal["text"] + text: str + """ + The message's text content. + """ diff --git a/src/humanloop/requests/text_evaluator_stats_response.py b/src/humanloop/requests/text_evaluator_stats_response.py new file mode 100644 index 00000000..8f0f358d --- /dev/null +++ b/src/humanloop/requests/text_evaluator_stats_response.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class TextEvaluatorStatsResponseParams(typing_extensions.TypedDict): + """ + Base attributes for stats for an Evaluator Version-Evaluated Version pair + in the Evaluation. + """ + + evaluator_version_id: str + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int + """ + The total number of errored Evaluators for this Evaluator Version. + """ diff --git a/src/humanloop/requests/tool_call.py b/src/humanloop/requests/tool_call.py new file mode 100644 index 00000000..d491b49b --- /dev/null +++ b/src/humanloop/requests/tool_call.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.chat_tool_type import ChatToolType +from .function_tool import FunctionToolParams + + +class ToolCallParams(typing_extensions.TypedDict): + """ + A tool call to be made. + """ + + id: str + type: ChatToolType + function: FunctionToolParams diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py new file mode 100644 index 00000000..e00069de --- /dev/null +++ b/src/humanloop/requests/tool_call_response.py @@ -0,0 +1,146 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .evaluator_log_response import EvaluatorLogResponseParams +from .log_response import LogResponseParams +from .tool_response import ToolResponseParams + + +class ToolCallResponseParams(typing_extensions.TypedDict): + """ + Response model for a Tool call. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + tool: ToolResponseParams + """ + Tool used to generate the Log. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + ID of the log. + """ + + evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + ID of the Trace containing the Tool Call Log. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] + """ + Logs nested under this Log in the Trace. + """ diff --git a/src/humanloop/requests/tool_choice.py b/src/humanloop/requests/tool_choice.py new file mode 100644 index 00000000..22ab3251 --- /dev/null +++ b/src/humanloop/requests/tool_choice.py @@ -0,0 +1,14 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.chat_tool_type import ChatToolType +from .function_tool_choice import FunctionToolChoiceParams + + +class ToolChoiceParams(typing_extensions.TypedDict): + """ + Tool choice to force the model to use a tool. + """ + + type: ChatToolType + function: FunctionToolChoiceParams diff --git a/src/humanloop/requests/tool_function.py b/src/humanloop/requests/tool_function.py new file mode 100644 index 00000000..9132b10e --- /dev/null +++ b/src/humanloop/requests/tool_function.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class ToolFunctionParams(typing_extensions.TypedDict): + name: str + """ + Name for the tool referenced by the model. + """ + + description: str + """ + Description of the tool referenced by the model + """ + + strict: typing_extensions.NotRequired[bool] + """ + If true, forces the model to output json data in the structure of the parameters schema. + """ + + parameters: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ + """ diff --git a/src/humanloop/requests/tool_kernel_request.py b/src/humanloop/requests/tool_kernel_request.py new file mode 100644 index 00000000..48f8f5b1 --- /dev/null +++ b/src/humanloop/requests/tool_kernel_request.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .tool_function import ToolFunctionParams + + +class ToolKernelRequestParams(typing_extensions.TypedDict): + function: typing_extensions.NotRequired[ToolFunctionParams] + """ + Callable function specification of the Tool shown to the model for tool calling. + """ + + source_code: typing_extensions.NotRequired[str] + """ + Code source of the Tool. + """ + + setup_values: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + """ diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py new file mode 100644 index 00000000..f4be5ad0 --- /dev/null +++ b/src/humanloop/requests/tool_log_response.py @@ -0,0 +1,156 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.log_status import LogStatus +from .chat_message import ChatMessageParams +from .tool_response import ToolResponseParams + +if typing.TYPE_CHECKING: + from .evaluator_log_response import EvaluatorLogResponseParams + from .log_response import LogResponseParams + + +class ToolLogResponseParams(typing_extensions.TypedDict): + """ + General request for creating a Log + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] + """ + Logs nested under this Log in the Trace. + """ + + tool: ToolResponseParams + """ + Tool used to generate the Log. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the Tool. + """ diff --git a/src/humanloop/requests/tool_response.py b/src/humanloop/requests/tool_response.py new file mode 100644 index 00000000..ea4ab1df --- /dev/null +++ b/src/humanloop/requests/tool_response.py @@ -0,0 +1,145 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import typing_extensions +from ..types.files_tool_type import FilesToolType +from ..types.user_response import UserResponse +from .environment_response import EnvironmentResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams +from .input_response import InputResponseParams +from .tool_function import ToolFunctionParams + +if typing.TYPE_CHECKING: + from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams + + +class ToolResponseParams(typing_extensions.TypedDict): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str + """ + Path of the Tool, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Tool. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + function: typing_extensions.NotRequired[ToolFunctionParams] + """ + Callable function specification of the Tool shown to the model for tool calling. + """ + + source_code: typing_extensions.NotRequired[str] + """ + Code source of the Tool. + """ + + setup_values: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + """ + + tool_type: typing_extensions.NotRequired[FilesToolType] + """ + Type of Tool. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the Version. + """ + + name: str + """ + Name of the Tool, which is used as a unique identifier. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Tool. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + version_id: str + """ + Unique identifier for the specific Tool Version. If no query params provided, the default deployed Tool Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["tool"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Tool Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Tool. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Tool Version + """ + + total_logs_count: int + """ + The number of logs that have been generated across all Tool Versions + """ + + inputs: typing.Sequence[InputResponseParams] + """ + Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Tool template. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] + """ + Evaluators that have been attached to this Tool that are used for monitoring logs. + """ + + signature: typing_extensions.NotRequired[str] + """ + Signature of the Tool. + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Tool Version. + """ diff --git a/src/humanloop/requests/update_version_request.py b/src/humanloop/requests/update_version_request.py new file mode 100644 index 00000000..204b3b37 --- /dev/null +++ b/src/humanloop/requests/update_version_request.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class UpdateVersionRequestParams(typing_extensions.TypedDict): + name: typing_extensions.NotRequired[str] + """ + Name of the version. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the version. + """ diff --git a/src/humanloop/requests/validation_error.py b/src/humanloop/requests/validation_error.py new file mode 100644 index 00000000..fba151d8 --- /dev/null +++ b/src/humanloop/requests/validation_error.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .validation_error_loc_item import ValidationErrorLocItemParams + + +class ValidationErrorParams(typing_extensions.TypedDict): + loc: typing.Sequence[ValidationErrorLocItemParams] + msg: str + type: str diff --git a/src/humanloop/requests/validation_error_loc_item.py b/src/humanloop/requests/validation_error_loc_item.py new file mode 100644 index 00000000..b6ab5a3d --- /dev/null +++ b/src/humanloop/requests/validation_error_loc_item.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ValidationErrorLocItemParams = typing.Union[str, int] diff --git a/src/humanloop/requests/version_deployment_response.py b/src/humanloop/requests/version_deployment_response.py new file mode 100644 index 00000000..fdd17544 --- /dev/null +++ b/src/humanloop/requests/version_deployment_response.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import typing_extensions +from .environment_response import EnvironmentResponseParams + +if typing.TYPE_CHECKING: + from .version_deployment_response_file import VersionDeploymentResponseFileParams + + +class VersionDeploymentResponseParams(typing_extensions.TypedDict): + """ + A variable reference to the Version deployed to an Environment + """ + + file: "VersionDeploymentResponseFileParams" + """ + The File that the deployed Version belongs to. + """ + + environment: EnvironmentResponseParams + """ + The Environment that the Version is deployed to. + """ + + type: typing.Literal["environment"] diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py new file mode 100644 index 00000000..74e04ab8 --- /dev/null +++ b/src/humanloop/requests/version_deployment_response_file.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .dataset_response import DatasetResponseParams + +if typing.TYPE_CHECKING: + from .agent_response import AgentResponseParams + from .evaluator_response import EvaluatorResponseParams + from .flow_response import FlowResponseParams + from .prompt_response import PromptResponseParams + from .tool_response import ToolResponseParams +VersionDeploymentResponseFileParams = typing.Union[ + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", +] diff --git a/src/humanloop/requests/version_id.py b/src/humanloop/requests/version_id.py new file mode 100644 index 00000000..102b6b10 --- /dev/null +++ b/src/humanloop/requests/version_id.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class VersionIdParams(typing_extensions.TypedDict): + version_id: str + """ + Unique identifier for the Version. + """ diff --git a/src/humanloop/requests/version_id_response.py b/src/humanloop/requests/version_id_response.py new file mode 100644 index 00000000..af4d3226 --- /dev/null +++ b/src/humanloop/requests/version_id_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import typing_extensions + +if typing.TYPE_CHECKING: + from .version_id_response_version import VersionIdResponseVersionParams + + +class VersionIdResponseParams(typing_extensions.TypedDict): + """ + A reference to a specific Version by its ID + """ + + version: "VersionIdResponseVersionParams" + """ + The specific Version being referenced. + """ + + type: typing.Literal["version"] diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py new file mode 100644 index 00000000..ac1f96e2 --- /dev/null +++ b/src/humanloop/requests/version_id_response_version.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .dataset_response import DatasetResponseParams + +if typing.TYPE_CHECKING: + from .agent_response import AgentResponseParams + from .evaluator_response import EvaluatorResponseParams + from .flow_response import FlowResponseParams + from .prompt_response import PromptResponseParams + from .tool_response import ToolResponseParams +VersionIdResponseVersionParams = typing.Union[ + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", +] diff --git a/src/humanloop/requests/version_reference_response.py b/src/humanloop/requests/version_reference_response.py new file mode 100644 index 00000000..4b80e4cd --- /dev/null +++ b/src/humanloop/requests/version_reference_response.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +if typing.TYPE_CHECKING: + from .version_deployment_response import VersionDeploymentResponseParams + from .version_id_response import VersionIdResponseParams +VersionReferenceResponseParams = typing.Union["VersionDeploymentResponseParams", "VersionIdResponseParams"] diff --git a/src/humanloop/requests/version_stats_response.py b/src/humanloop/requests/version_stats_response.py new file mode 100644 index 00000000..1bb18233 --- /dev/null +++ b/src/humanloop/requests/version_stats_response.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams + + +class VersionStatsResponseParams(typing_extensions.TypedDict): + version_id: str + """ + Unique identifier for the evaluated Version. + """ + + batch_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + num_logs: int + """ + The total number of existing Logs in this Run. + """ + + evaluator_version_stats: typing.Sequence[VersionStatsResponseEvaluatorVersionStatsItemParams] + """ + Stats for each Evaluator Version applied to this Run. + """ diff --git a/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py b/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py new file mode 100644 index 00000000..2bbeb15c --- /dev/null +++ b/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams +from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams +from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams +from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams + +VersionStatsResponseEvaluatorVersionStatsItemParams = typing.Union[ + NumericEvaluatorStatsResponseParams, + BooleanEvaluatorStatsResponseParams, + SelectEvaluatorStatsResponseParams, + TextEvaluatorStatsResponseParams, +] diff --git a/src/humanloop/tools/__init__.py b/src/humanloop/tools/__init__.py new file mode 100644 index 00000000..5cde0202 --- /dev/null +++ b/src/humanloop/tools/__init__.py @@ -0,0 +1,4 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py new file mode 100644 index 00000000..d8449a7c --- /dev/null +++ b/src/humanloop/tools/client.py @@ -0,0 +1,2101 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.pagination import AsyncPager, SyncPager +from ..core.request_options import RequestOptions +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams +from ..requests.tool_function import ToolFunctionParams +from ..requests.tool_kernel_request import ToolKernelRequestParams +from ..types.create_tool_log_response import CreateToolLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_environment_variable_request import FileEnvironmentVariableRequest +from ..types.file_sort_by import FileSortBy +from ..types.files_tool_type import FilesToolType +from ..types.list_tools import ListTools +from ..types.log_response import LogResponse +from ..types.log_status import LogStatus +from ..types.sort_order import SortOrder +from ..types.tool_call_response import ToolCallResponse +from ..types.tool_response import ToolResponse +from .raw_client import AsyncRawToolsClient, RawToolsClient + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ToolsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawToolsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawToolsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawToolsClient + """ + return self._raw_client + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolCallResponse: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolCallResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.call() + """ + _response = self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + tool=tool, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_call_request_environment=tool_call_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateToolLogResponse: + """ + Log to a Tool. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool, if not we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateToolLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' + , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} + , 'required': ['a', 'b'] + }}}, inputs={'a': 5 + , 'b': 7 + }, output='35', ) + """ + _response = self._raw_client.log( + version_id=version_id, + environment=environment, + path=path, + id=id, + tool=tool, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_log_request_environment=tool_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + def update( + self, + id: str, + log_id: str, + *, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.update(id='id', log_id='log_id', ) + """ + _response = self._raw_client.update( + id, + log_id, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[ToolResponse]: + """ + Get a list of all Tools. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Tools to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Tool name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Tools by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[ToolResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + response = client.tools.list(size=1, ) + for item in response: + yield item + # alternatively, you can paginate page-by-page + for page in response.iter_pages(): + yield page + """ + return self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + def upsert( + self, + *, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + function: typing.Optional[ToolFunctionParams] = OMIT, + source_code: typing.Optional[str] = OMIT, + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tool_type: typing.Optional[FilesToolType] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Create a Tool or update it with a new version if it already exists. + + Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + function : typing.Optional[ToolFunctionParams] + Callable function specification of the Tool shown to the model for tool calling. + + source_code : typing.Optional[str] + Code source of the Tool. + + setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + + tool_type : typing.Optional[FilesToolType] + Type of Tool. + + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.upsert(path='math-tool', function={'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' + , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} + , 'required': ['a', 'b'] + }}, version_name='math-tool-v1', version_description='Simple math tool that multiplies two numbers', ) + """ + _response = self._raw_client.upsert( + path=path, + id=id, + function=function, + source_code=source_code, + setup_values=setup_values, + attributes=attributes, + tool_type=tool_type, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Retrieve the Tool with the given ID. + + By default, the deployed version of the Tool is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : typing.Optional[str] + A specific Version ID of the Tool to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.get(id='tl_789ghi', ) + """ + _response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Tool with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.delete(id='tl_789ghi', ) + """ + _response = self._raw_client.delete(id, request_options=request_options) + return _response.data + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Move the Tool to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + path : typing.Optional[str] + Path of the Tool including the Tool name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Tool, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.move(id='tl_789ghi', path='new directory/new name', ) + """ + _response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListTools: + """ + Get a list of all the versions of a Tool. + + Parameters + ---------- + id : str + Unique identifier for the Tool. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListTools + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.list_versions(id='tl_789ghi', ) + """ + _response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.delete_tool_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.delete_tool_version(id, version_id, request_options=request_options) + return _response.data + + def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Update the name or description of the Tool version. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.update_tool_version(id='id', version_id='version_id', ) + """ + _response = self._raw_client.update_tool_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> ToolResponse: + """ + Deploy Tool to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.set_deployment(id='tl_789ghi', environment_id='staging', version_id='tv_012jkl', ) + """ + _response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Tool from the Environment. + + Remove the deployed version for the specified Environment. This Tool + will no longer be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.remove_deployment(id='tl_789ghi', environment_id='staging', ) + """ + _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.list_environments(id='tl_789ghi', ) + """ + _response = self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Activate and deactivate Evaluators for monitoring the Tool. + + An activated Evaluator will automatically be run on all new Logs + within the Tool for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.update_monitoring(id='tl_789ghi', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) + """ + _response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.get_environment_variables(id='id', ) + """ + _response = self._raw_client.get_environment_variables(id, request_options=request_options) + return _response.data + + def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.add_environment_variable(id='id', request=[{'name': 'name', 'value': 'value'}], ) + """ + _response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options) + return _response.data + + def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + client = Humanloop(api_key="YOUR_API_KEY", ) + client.tools.delete_environment_variable(id='id', name='name', ) + """ + _response = self._raw_client.delete_environment_variable(id, name, request_options=request_options) + return _response.data + + +class AsyncToolsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawToolsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawToolsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawToolsClient + """ + return self._raw_client + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolCallResponse: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolCallResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.call() + asyncio.run(main()) + """ + _response = await self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + tool=tool, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_call_request_environment=tool_call_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateToolLogResponse: + """ + Log to a Tool. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool, if not we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateToolLogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' + , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} + , 'required': ['a', 'b'] + }}}, inputs={'a': 5 + , 'b': 7 + }, output='35', ) + asyncio.run(main()) + """ + _response = await self._raw_client.log( + version_id=version_id, + environment=environment, + path=path, + id=id, + tool=tool, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_log_request_environment=tool_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return _response.data + + async def update( + self, + id: str, + log_id: str, + *, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.update(id='id', log_id='log_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update( + id, + log_id, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + request_options=request_options, + ) + return _response.data + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[ToolResponse]: + """ + Get a list of all Tools. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Tools to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Tool name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Tools by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[ToolResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + response = await client.tools.list(size=1, ) + async for item in response: + yield item + + # alternatively, you can paginate page-by-page + async for page in response.iter_pages(): + yield page + asyncio.run(main()) + """ + return await self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + async def upsert( + self, + *, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + function: typing.Optional[ToolFunctionParams] = OMIT, + source_code: typing.Optional[str] = OMIT, + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tool_type: typing.Optional[FilesToolType] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Create a Tool or update it with a new version if it already exists. + + Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + function : typing.Optional[ToolFunctionParams] + Callable function specification of the Tool shown to the model for tool calling. + + source_code : typing.Optional[str] + Code source of the Tool. + + setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + + tool_type : typing.Optional[FilesToolType] + Type of Tool. + + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.upsert(path='math-tool', function={'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object' + , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}} + , 'required': ['a', 'b'] + }}, version_name='math-tool-v1', version_description='Simple math tool that multiplies two numbers', ) + asyncio.run(main()) + """ + _response = await self._raw_client.upsert( + path=path, + id=id, + function=function, + source_code=source_code, + setup_values=setup_values, + attributes=attributes, + tool_type=tool_type, + version_name=version_name, + version_description=version_description, + request_options=request_options, + ) + return _response.data + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Retrieve the Tool with the given ID. + + By default, the deployed version of the Tool is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : typing.Optional[str] + A specific Version ID of the Tool to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.get(id='tl_789ghi', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return _response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Tool with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.delete(id='tl_789ghi', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete(id, request_options=request_options) + return _response.data + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Move the Tool to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + path : typing.Optional[str] + Path of the Tool including the Tool name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Tool, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.move(id='tl_789ghi', path='new directory/new name', ) + asyncio.run(main()) + """ + _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return _response.data + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListTools: + """ + Get a list of all the versions of a Tool. + + Parameters + ---------- + id : str + Unique identifier for the Tool. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListTools + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.list_versions(id='tl_789ghi', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return _response.data + + async def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.delete_tool_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete_tool_version(id, version_id, request_options=request_options) + return _response.data + + async def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Update the name or description of the Tool version. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.update_tool_version(id='id', version_id='version_id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_tool_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return _response.data + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> ToolResponse: + """ + Deploy Tool to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.set_deployment(id='tl_789ghi', environment_id='staging', version_id='tv_012jkl', ) + asyncio.run(main()) + """ + _response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return _response.data + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Tool from the Environment. + + Remove the deployed version for the specified Environment. This Tool + will no longer be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.remove_deployment(id='tl_789ghi', environment_id='staging', ) + asyncio.run(main()) + """ + _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return _response.data + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.list_environments(id='tl_789ghi', ) + asyncio.run(main()) + """ + _response = await self._raw_client.list_environments(id, request_options=request_options) + return _response.data + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: + """ + Activate and deactivate Evaluators for monitoring the Tool. + + An activated Evaluator will automatically be run on all new Logs + within the Tool for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.update_monitoring(id='tl_789ghi', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return _response.data + + async def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.get_environment_variables(id='id', ) + asyncio.run(main()) + """ + _response = await self._raw_client.get_environment_variables(id, request_options=request_options) + return _response.data + + async def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.add_environment_variable(id='id', request=[{'name': 'name', 'value': 'value'}], ) + asyncio.run(main()) + """ + _response = await self._raw_client.add_environment_variable( + id, request=request, request_options=request_options + ) + return _response.data + + async def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import AsyncHumanloop + import asyncio + client = AsyncHumanloop(api_key="YOUR_API_KEY", ) + async def main() -> None: + await client.tools.delete_environment_variable(id='id', name='name', ) + asyncio.run(main()) + """ + _response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options) + return _response.data diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py new file mode 100644 index 00000000..85bbef9e --- /dev/null +++ b/src/humanloop/tools/raw_client.py @@ -0,0 +1,2917 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing +from json.decoder import JSONDecodeError + +from ..core.api_error import ApiError +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager +from ..core.request_options import RequestOptions +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams +from ..requests.tool_function import ToolFunctionParams +from ..requests.tool_kernel_request import ToolKernelRequestParams +from ..types.create_tool_log_response import CreateToolLogResponse +from ..types.file_environment_response import FileEnvironmentResponse +from ..types.file_environment_variable_request import FileEnvironmentVariableRequest +from ..types.file_sort_by import FileSortBy +from ..types.files_tool_type import FilesToolType +from ..types.http_validation_error import HttpValidationError +from ..types.list_tools import ListTools +from ..types.log_response import LogResponse +from ..types.log_status import LogStatus +from ..types.paginated_data_tool_response import PaginatedDataToolResponse +from ..types.sort_order import SortOrder +from ..types.tool_call_response import ToolCallResponse +from ..types.tool_response import ToolResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawToolsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolCallResponse]: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolCallResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "tools/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_call_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolCallResponse, + construct_type( + type_=ToolCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateToolLogResponse]: + """ + Log to a Tool. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool, if not we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateToolLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "tools/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateToolLogResponse, + construct_type( + type_=CreateToolLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update( + self, + id: str, + log_id: str, + *, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[LogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> SyncPager[ToolResponse]: + """ + Get a list of all Tools. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Tools to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Tool name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Tools by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SyncPager[ToolResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = self._client_wrapper.httpx_client.request( + "tools", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataToolResponse, + construct_type( + type_=PaginatedDataToolResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + _get_next = lambda: self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return SyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def upsert( + self, + *, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + function: typing.Optional[ToolFunctionParams] = OMIT, + source_code: typing.Optional[str] = OMIT, + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tool_type: typing.Optional[FilesToolType] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Create a Tool or update it with a new version if it already exists. + + Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + function : typing.Optional[ToolFunctionParams] + Callable function specification of the Tool shown to the model for tool calling. + + source_code : typing.Optional[str] + Code source of the Tool. + + setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + + tool_type : typing.Optional[FilesToolType] + Type of Tool. + + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "tools", + method="POST", + json={ + "path": path, + "id": id, + "function": convert_and_respect_annotation_metadata( + object_=function, annotation=ToolFunctionParams, direction="write" + ), + "source_code": source_code, + "setup_values": setup_values, + "attributes": attributes, + "tool_type": tool_type, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Retrieve the Tool with the given ID. + + By default, the deployed version of the Tool is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : typing.Optional[str] + A specific Version ID of the Tool to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Tool with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Move the Tool to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + path : typing.Optional[str] + Path of the Tool including the Tool name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Tool, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListTools]: + """ + Get a list of all the versions of a Tool. + + Parameters + ---------- + id : str + Unique identifier for the Tool. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListTools] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListTools, + construct_type( + type_=ListTools, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Update the name or description of the Tool version. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[ToolResponse]: + """ + Deploy Tool to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Tool from the Environment. + + Remove the deployed version for the specified Environment. This Tool + will no longer be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Activate and deactivate Evaluators for monitoring the Tool. + + An activated Evaluator will automatically be run on all new Logs + within the Tool for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" + ), + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawToolsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolCallResponse]: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolCallResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "tools/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_call_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolCallResponse, + construct_type( + type_=ToolCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateToolLogResponse]: + """ + Log to a Tool. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool, if not we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateToolLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "tools/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateToolLogResponse, + construct_type( + type_=CreateToolLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update( + self, + id: str, + log_id: str, + *, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[LogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[FileSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncPager[ToolResponse]: + """ + Get a list of all Tools. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Tools to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Tool name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. + + sort_by : typing.Optional[FileSortBy] + Field to sort Tools by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncPager[ToolResponse] + Successful Response + """ + page = page if page is not None else 1 + + _response = await self._client_wrapper.httpx_client.request( + "tools", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _parsed_response = typing.cast( + PaginatedDataToolResponse, + construct_type( + type_=PaginatedDataToolResponse, # type: ignore + object_=_response.json(), + ), + ) + _items = _parsed_response.records + _has_next = True + + async def _get_next(): + return await self.list( + page=page + 1, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + + return AsyncPager( + has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def upsert( + self, + *, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + function: typing.Optional[ToolFunctionParams] = OMIT, + source_code: typing.Optional[str] = OMIT, + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tool_type: typing.Optional[FilesToolType] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Create a Tool or update it with a new version if it already exists. + + Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + function : typing.Optional[ToolFunctionParams] + Callable function specification of the Tool shown to the model for tool calling. + + source_code : typing.Optional[str] + Code source of the Tool. + + setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + + tool_type : typing.Optional[FilesToolType] + Type of Tool. + + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "tools", + method="POST", + json={ + "path": path, + "id": id, + "function": convert_and_respect_annotation_metadata( + object_=function, annotation=ToolFunctionParams, direction="write" + ), + "source_code": source_code, + "setup_values": setup_values, + "attributes": attributes, + "tool_type": tool_type, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Retrieve the Tool with the given ID. + + By default, the deployed version of the Tool is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : typing.Optional[str] + A specific Version ID of the Tool to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Tool with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Move the Tool to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + path : typing.Optional[str] + Path of the Tool including the Tool name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Tool, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListTools]: + """ + Get a list of all the versions of a Tool. + + Parameters + ---------- + id : str + Unique identifier for the Tool. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListTools] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListTools, + construct_type( + type_=ListTools, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Update the name or description of the Tool version. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[ToolResponse]: + """ + Deploy Tool to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Tool from the Environment. + + Remove the deployed version for the specified Environment. This Tool + will no longer be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Activate and deactivate Evaluators for monitoring the Tool. + + An activated Evaluator will automatically be run on all new Logs + within the Tool for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" + ), + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + headers=dict(_response.headers), + body=typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py new file mode 100644 index 00000000..5662ea6d --- /dev/null +++ b/src/humanloop/types/__init__.py @@ -0,0 +1,411 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +from .agent_call_response import AgentCallResponse +from .agent_call_response_tool_choice import AgentCallResponseToolChoice +from .agent_call_stream_response import AgentCallStreamResponse +from .agent_call_stream_response_payload import AgentCallStreamResponsePayload +from .agent_config_response import AgentConfigResponse +from .agent_continue_call_response import AgentContinueCallResponse +from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice +from .agent_continue_call_stream_response import AgentContinueCallStreamResponse +from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload +from .agent_inline_tool import AgentInlineTool +from .agent_kernel_request import AgentKernelRequest +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort +from .agent_kernel_request_stop import AgentKernelRequestStop +from .agent_kernel_request_template import AgentKernelRequestTemplate +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem +from .agent_linked_file_request import AgentLinkedFileRequest +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_linked_file_response_file import AgentLinkedFileResponseFile +from .agent_log_response import AgentLogResponse +from .agent_log_response_tool_choice import AgentLogResponseToolChoice +from .agent_log_stream_response import AgentLogStreamResponse +from .agent_response import AgentResponse +from .agent_response_reasoning_effort import AgentResponseReasoningEffort +from .agent_response_stop import AgentResponseStop +from .agent_response_template import AgentResponseTemplate +from .agent_response_tools_item import AgentResponseToolsItem +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent +from .anthropic_thinking_content import AnthropicThinkingContent +from .base_models_user_response import BaseModelsUserResponse +from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse +from .chat_message import ChatMessage +from .chat_message_content import ChatMessageContent +from .chat_message_content_item import ChatMessageContentItem +from .chat_message_thinking_item import ChatMessageThinkingItem +from .chat_role import ChatRole +from .chat_tool_type import ChatToolType +from .code_evaluator_request import CodeEvaluatorRequest +from .config_tool_response import ConfigToolResponse +from .create_agent_log_response import CreateAgentLogResponse +from .create_datapoint_request import CreateDatapointRequest +from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue +from .create_evaluator_log_response import CreateEvaluatorLogResponse +from .create_flow_log_response import CreateFlowLogResponse +from .create_prompt_log_response import CreatePromptLogResponse +from .create_tool_log_response import CreateToolLogResponse +from .dashboard_configuration import DashboardConfiguration +from .datapoint_response import DatapointResponse +from .datapoint_response_target_value import DatapointResponseTargetValue +from .dataset_response import DatasetResponse +from .datasets_request import DatasetsRequest +from .directory_response import DirectoryResponse +from .directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse +from .directory_with_parents_and_children_response_files_item import DirectoryWithParentsAndChildrenResponseFilesItem +from .environment_response import EnvironmentResponse +from .environment_tag import EnvironmentTag +from .evaluatee_request import EvaluateeRequest +from .evaluatee_response import EvaluateeResponse +from .evaluation_evaluator_response import EvaluationEvaluatorResponse +from .evaluation_log_response import EvaluationLogResponse +from .evaluation_response import EvaluationResponse +from .evaluation_run_response import EvaluationRunResponse +from .evaluation_runs_response import EvaluationRunsResponse +from .evaluation_stats import EvaluationStats +from .evaluation_status import EvaluationStatus +from .evaluations_dataset_request import EvaluationsDatasetRequest +from .evaluations_request import EvaluationsRequest +from .evaluator_activation_deactivation_request import EvaluatorActivationDeactivationRequest +from .evaluator_activation_deactivation_request_activate_item import EvaluatorActivationDeactivationRequestActivateItem +from .evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItem, +) +from .evaluator_aggregate import EvaluatorAggregate +from .evaluator_arguments_type import EvaluatorArgumentsType +from .evaluator_config_response import EvaluatorConfigResponse +from .evaluator_file_id import EvaluatorFileId +from .evaluator_file_path import EvaluatorFilePath +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse +from .evaluator_log_response import EvaluatorLogResponse +from .evaluator_log_response_judgment import EvaluatorLogResponseJudgment +from .evaluator_response import EvaluatorResponse +from .evaluator_response_spec import EvaluatorResponseSpec +from .evaluator_return_type_enum import EvaluatorReturnTypeEnum +from .evaluator_version_id import EvaluatorVersionId +from .evaluators_request import EvaluatorsRequest +from .event_type import EventType +from .external_evaluator_request import ExternalEvaluatorRequest +from .feedback_type import FeedbackType +from .file_environment_response import FileEnvironmentResponse +from .file_environment_response_file import FileEnvironmentResponseFile +from .file_environment_variable_request import FileEnvironmentVariableRequest +from .file_id import FileId +from .file_path import FilePath +from .file_request import FileRequest +from .file_sort_by import FileSortBy +from .file_type import FileType +from .files_tool_type import FilesToolType +from .flow_kernel_request import FlowKernelRequest +from .flow_log_response import FlowLogResponse +from .flow_response import FlowResponse +from .function_tool import FunctionTool +from .function_tool_choice import FunctionToolChoice +from .http_validation_error import HttpValidationError +from .human_evaluator_request import HumanEvaluatorRequest +from .human_evaluator_request_return_type import HumanEvaluatorRequestReturnType +from .image_chat_content import ImageChatContent +from .image_url import ImageUrl +from .image_url_detail import ImageUrlDetail +from .input_response import InputResponse +from .linked_file_request import LinkedFileRequest +from .linked_tool_response import LinkedToolResponse +from .list_agents import ListAgents +from .list_datasets import ListDatasets +from .list_evaluators import ListEvaluators +from .list_flows import ListFlows +from .list_prompts import ListPrompts +from .list_tools import ListTools +from .llm_evaluator_request import LlmEvaluatorRequest +from .log_response import LogResponse +from .log_status import LogStatus +from .log_stream_response import LogStreamResponse +from .model_endpoints import ModelEndpoints +from .model_providers import ModelProviders +from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .monitoring_evaluator_state import MonitoringEvaluatorState +from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest +from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse +from .observability_status import ObservabilityStatus +from .on_agent_call_enum import OnAgentCallEnum +from .open_ai_reasoning_effort import OpenAiReasoningEffort +from .overall_stats import OverallStats +from .paginated_data_agent_response import PaginatedDataAgentResponse +from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse +from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse +from .paginated_data_flow_response import PaginatedDataFlowResponse +from .paginated_data_log_response import PaginatedDataLogResponse +from .paginated_data_prompt_response import PaginatedDataPromptResponse +from .paginated_data_tool_response import PaginatedDataToolResponse +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, +) +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, +) +from .paginated_datapoint_response import PaginatedDatapointResponse +from .paginated_dataset_response import PaginatedDatasetResponse +from .paginated_evaluation_response import PaginatedEvaluationResponse +from .paginated_prompt_log_response import PaginatedPromptLogResponse +from .paginated_session_response import PaginatedSessionResponse +from .platform_access_enum import PlatformAccessEnum +from .populate_template_response import PopulateTemplateResponse +from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort +from .populate_template_response_stop import PopulateTemplateResponseStop +from .populate_template_response_template import PopulateTemplateResponseTemplate +from .prompt_call_log_response import PromptCallLogResponse +from .prompt_call_response import PromptCallResponse +from .prompt_call_response_tool_choice import PromptCallResponseToolChoice +from .prompt_call_stream_response import PromptCallStreamResponse +from .prompt_kernel_request import PromptKernelRequest +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort +from .prompt_kernel_request_stop import PromptKernelRequestStop +from .prompt_kernel_request_template import PromptKernelRequestTemplate +from .prompt_log_response import PromptLogResponse +from .prompt_log_response_tool_choice import PromptLogResponseToolChoice +from .prompt_response import PromptResponse +from .prompt_response_reasoning_effort import PromptResponseReasoningEffort +from .prompt_response_stop import PromptResponseStop +from .prompt_response_template import PromptResponseTemplate +from .provider_api_keys import ProviderApiKeys +from .response_format import ResponseFormat +from .response_format_type import ResponseFormatType +from .run_stats_response import RunStatsResponse +from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItem +from .run_version_response import RunVersionResponse +from .select_evaluator_stats_response import SelectEvaluatorStatsResponse +from .sort_order import SortOrder +from .template_language import TemplateLanguage +from .text_chat_content import TextChatContent +from .text_evaluator_stats_response import TextEvaluatorStatsResponse +from .time_unit import TimeUnit +from .tool_call import ToolCall +from .tool_call_response import ToolCallResponse +from .tool_choice import ToolChoice +from .tool_function import ToolFunction +from .tool_kernel_request import ToolKernelRequest +from .tool_log_response import ToolLogResponse +from .tool_response import ToolResponse +from .update_dateset_action import UpdateDatesetAction +from .update_evaluation_status_request import UpdateEvaluationStatusRequest +from .update_version_request import UpdateVersionRequest +from .user_response import UserResponse +from .valence import Valence +from .validation_error import ValidationError +from .validation_error_loc_item import ValidationErrorLocItem +from .version_deployment_response import VersionDeploymentResponse +from .version_deployment_response_file import VersionDeploymentResponseFile +from .version_id import VersionId +from .version_id_response import VersionIdResponse +from .version_id_response_version import VersionIdResponseVersion +from .version_reference_response import VersionReferenceResponse +from .version_stats_response import VersionStatsResponse +from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem +from .version_status import VersionStatus + +__all__ = [ + "AgentCallResponse", + "AgentCallResponseToolChoice", + "AgentCallStreamResponse", + "AgentCallStreamResponsePayload", + "AgentConfigResponse", + "AgentContinueCallResponse", + "AgentContinueCallResponseToolChoice", + "AgentContinueCallStreamResponse", + "AgentContinueCallStreamResponsePayload", + "AgentInlineTool", + "AgentKernelRequest", + "AgentKernelRequestReasoningEffort", + "AgentKernelRequestStop", + "AgentKernelRequestTemplate", + "AgentKernelRequestToolsItem", + "AgentLinkedFileRequest", + "AgentLinkedFileResponse", + "AgentLinkedFileResponseFile", + "AgentLogResponse", + "AgentLogResponseToolChoice", + "AgentLogStreamResponse", + "AgentResponse", + "AgentResponseReasoningEffort", + "AgentResponseStop", + "AgentResponseTemplate", + "AgentResponseToolsItem", + "AnthropicRedactedThinkingContent", + "AnthropicThinkingContent", + "BaseModelsUserResponse", + "BooleanEvaluatorStatsResponse", + "ChatMessage", + "ChatMessageContent", + "ChatMessageContentItem", + "ChatMessageThinkingItem", + "ChatRole", + "ChatToolType", + "CodeEvaluatorRequest", + "ConfigToolResponse", + "CreateAgentLogResponse", + "CreateDatapointRequest", + "CreateDatapointRequestTargetValue", + "CreateEvaluatorLogResponse", + "CreateFlowLogResponse", + "CreatePromptLogResponse", + "CreateToolLogResponse", + "DashboardConfiguration", + "DatapointResponse", + "DatapointResponseTargetValue", + "DatasetResponse", + "DatasetsRequest", + "DirectoryResponse", + "DirectoryWithParentsAndChildrenResponse", + "DirectoryWithParentsAndChildrenResponseFilesItem", + "EnvironmentResponse", + "EnvironmentTag", + "EvaluateeRequest", + "EvaluateeResponse", + "EvaluationEvaluatorResponse", + "EvaluationLogResponse", + "EvaluationResponse", + "EvaluationRunResponse", + "EvaluationRunsResponse", + "EvaluationStats", + "EvaluationStatus", + "EvaluationsDatasetRequest", + "EvaluationsRequest", + "EvaluatorActivationDeactivationRequest", + "EvaluatorActivationDeactivationRequestActivateItem", + "EvaluatorActivationDeactivationRequestDeactivateItem", + "EvaluatorAggregate", + "EvaluatorArgumentsType", + "EvaluatorConfigResponse", + "EvaluatorFileId", + "EvaluatorFilePath", + "EvaluatorJudgmentNumberLimit", + "EvaluatorJudgmentOptionResponse", + "EvaluatorLogResponse", + "EvaluatorLogResponseJudgment", + "EvaluatorResponse", + "EvaluatorResponseSpec", + "EvaluatorReturnTypeEnum", + "EvaluatorVersionId", + "EvaluatorsRequest", + "EventType", + "ExternalEvaluatorRequest", + "FeedbackType", + "FileEnvironmentResponse", + "FileEnvironmentResponseFile", + "FileEnvironmentVariableRequest", + "FileId", + "FilePath", + "FileRequest", + "FileSortBy", + "FileType", + "FilesToolType", + "FlowKernelRequest", + "FlowLogResponse", + "FlowResponse", + "FunctionTool", + "FunctionToolChoice", + "HttpValidationError", + "HumanEvaluatorRequest", + "HumanEvaluatorRequestReturnType", + "ImageChatContent", + "ImageUrl", + "ImageUrlDetail", + "InputResponse", + "LinkedFileRequest", + "LinkedToolResponse", + "ListAgents", + "ListDatasets", + "ListEvaluators", + "ListFlows", + "ListPrompts", + "ListTools", + "LlmEvaluatorRequest", + "LogResponse", + "LogStatus", + "LogStreamResponse", + "ModelEndpoints", + "ModelProviders", + "MonitoringEvaluatorEnvironmentRequest", + "MonitoringEvaluatorResponse", + "MonitoringEvaluatorState", + "MonitoringEvaluatorVersionRequest", + "NumericEvaluatorStatsResponse", + "ObservabilityStatus", + "OnAgentCallEnum", + "OpenAiReasoningEffort", + "OverallStats", + "PaginatedDataAgentResponse", + "PaginatedDataEvaluationLogResponse", + "PaginatedDataEvaluatorResponse", + "PaginatedDataFlowResponse", + "PaginatedDataLogResponse", + "PaginatedDataPromptResponse", + "PaginatedDataToolResponse", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", + "PaginatedDatapointResponse", + "PaginatedDatasetResponse", + "PaginatedEvaluationResponse", + "PaginatedPromptLogResponse", + "PaginatedSessionResponse", + "PlatformAccessEnum", + "PopulateTemplateResponse", + "PopulateTemplateResponsePopulatedTemplate", + "PopulateTemplateResponseReasoningEffort", + "PopulateTemplateResponseStop", + "PopulateTemplateResponseTemplate", + "PromptCallLogResponse", + "PromptCallResponse", + "PromptCallResponseToolChoice", + "PromptCallStreamResponse", + "PromptKernelRequest", + "PromptKernelRequestReasoningEffort", + "PromptKernelRequestStop", + "PromptKernelRequestTemplate", + "PromptLogResponse", + "PromptLogResponseToolChoice", + "PromptResponse", + "PromptResponseReasoningEffort", + "PromptResponseStop", + "PromptResponseTemplate", + "ProviderApiKeys", + "ResponseFormat", + "ResponseFormatType", + "RunStatsResponse", + "RunStatsResponseEvaluatorStatsItem", + "RunVersionResponse", + "SelectEvaluatorStatsResponse", + "SortOrder", + "TemplateLanguage", + "TextChatContent", + "TextEvaluatorStatsResponse", + "TimeUnit", + "ToolCall", + "ToolCallResponse", + "ToolChoice", + "ToolFunction", + "ToolKernelRequest", + "ToolLogResponse", + "ToolResponse", + "UpdateDatesetAction", + "UpdateEvaluationStatusRequest", + "UpdateVersionRequest", + "UserResponse", + "Valence", + "ValidationError", + "ValidationErrorLocItem", + "VersionDeploymentResponse", + "VersionDeploymentResponseFile", + "VersionId", + "VersionIdResponse", + "VersionIdResponseVersion", + "VersionReferenceResponse", + "VersionStatsResponse", + "VersionStatsResponseEvaluatorVersionStatsItem", + "VersionStatus", +] diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py new file mode 100644 index 00000000..9bf3bb70 --- /dev/null +++ b/src/humanloop/types/agent_call_response.py @@ -0,0 +1,231 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_call_response_tool_choice import AgentCallResponseToolChoice +from .chat_message import ChatMessage +from .log_status import LogStatus + + +class AgentCallResponse(UncheckedBaseModel): + """ + Response model for a Agent call. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: "AgentResponse" = pydantic.Field() + """ + Agent that generated the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(AgentCallResponse) diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py new file mode 100644 index 00000000..2d5a032d --- /dev/null +++ b/src/humanloop/types/agent_call_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoice + +AgentCallResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py new file mode 100644 index 00000000..c7fa9e1c --- /dev/null +++ b/src/humanloop/types/agent_call_stream_response.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_call_stream_response_payload import AgentCallStreamResponsePayload +from .event_type import EventType + + +class AgentCallStreamResponse(UncheckedBaseModel): + """ + Response model for calling Agent in streaming mode. + """ + + log_id: str + message: str + payload: typing.Optional[AgentCallStreamResponsePayload] = None + type: EventType + created_at: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(AgentCallStreamResponse) diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py new file mode 100644 index 00000000..38120e12 --- /dev/null +++ b/src/humanloop/types/agent_call_stream_response_payload.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .log_response import LogResponse +from .log_stream_response import LogStreamResponse +from .tool_call import ToolCall + +AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall] diff --git a/src/humanloop/types/agent_config_response.py b/src/humanloop/types/agent_config_response.py new file mode 100644 index 00000000..ba346181 --- /dev/null +++ b/src/humanloop/types/agent_config_response.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class AgentConfigResponse(UncheckedBaseModel): + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_continue_call_response.py b/src/humanloop/types/agent_continue_call_response.py new file mode 100644 index 00000000..be988d07 --- /dev/null +++ b/src/humanloop/types/agent_continue_call_response.py @@ -0,0 +1,231 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice +from .chat_message import ChatMessage +from .log_status import LogStatus + + +class AgentContinueCallResponse(UncheckedBaseModel): + """ + Response model for continuing an Agent call. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentContinueCallResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: "AgentResponse" = pydantic.Field() + """ + Agent that generated the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(AgentContinueCallResponse) diff --git a/src/humanloop/types/agent_continue_call_response_tool_choice.py b/src/humanloop/types/agent_continue_call_response_tool_choice.py new file mode 100644 index 00000000..731cf6b2 --- /dev/null +++ b/src/humanloop/types/agent_continue_call_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoice + +AgentContinueCallResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_continue_call_stream_response.py b/src/humanloop/types/agent_continue_call_stream_response.py new file mode 100644 index 00000000..39f7642d --- /dev/null +++ b/src/humanloop/types/agent_continue_call_stream_response.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload +from .event_type import EventType + + +class AgentContinueCallStreamResponse(UncheckedBaseModel): + """ + Response model for continuing an Agent call in streaming mode. + """ + + log_id: str + message: str + payload: typing.Optional[AgentContinueCallStreamResponsePayload] = None + type: EventType + created_at: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(AgentContinueCallStreamResponse) diff --git a/src/humanloop/types/agent_continue_call_stream_response_payload.py b/src/humanloop/types/agent_continue_call_stream_response_payload.py new file mode 100644 index 00000000..1d51d8d2 --- /dev/null +++ b/src/humanloop/types/agent_continue_call_stream_response_payload.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .log_response import LogResponse +from .log_stream_response import LogStreamResponse +from .tool_call import ToolCall + +AgentContinueCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall] diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py new file mode 100644 index 00000000..466a0b2d --- /dev/null +++ b/src/humanloop/types/agent_inline_tool.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .on_agent_call_enum import OnAgentCallEnum +from .tool_function import ToolFunction + + +class AgentInlineTool(UncheckedBaseModel): + type: typing.Literal["inline"] = "inline" + json_schema: ToolFunction + on_agent_call: typing.Optional[OnAgentCallEnum] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py new file mode 100644 index 00000000..9cd36a6a --- /dev/null +++ b/src/humanloop/types/agent_kernel_request.py @@ -0,0 +1,123 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort +from .agent_kernel_request_stop import AgentKernelRequestStop +from .agent_kernel_request_template import AgentKernelRequestTemplate +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem +from .model_endpoints import ModelEndpoints +from .model_providers import ModelProviders +from .response_format import ResponseFormat +from .template_language import TemplateLanguage + + +class AgentKernelRequest(UncheckedBaseModel): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None) + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..3a0d2d24 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py new file mode 100644 index 00000000..e38c12e2 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentKernelRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py new file mode 100644 index 00000000..62f4d40f --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessage + +AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py new file mode 100644 index 00000000..043bb29b --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_tools_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_inline_tool import AgentInlineTool +from .agent_linked_file_request import AgentLinkedFileRequest + +AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py new file mode 100644 index 00000000..5d110bad --- /dev/null +++ b/src/humanloop/types/agent_linked_file_request.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .linked_file_request import LinkedFileRequest +from .on_agent_call_enum import OnAgentCallEnum + + +class AgentLinkedFileRequest(UncheckedBaseModel): + type: typing.Literal["file"] = "file" + link: LinkedFileRequest + on_agent_call: typing.Optional[OnAgentCallEnum] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py new file mode 100644 index 00000000..9788f37d --- /dev/null +++ b/src/humanloop/types/agent_linked_file_response.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .linked_file_request import LinkedFileRequest +from .on_agent_call_enum import OnAgentCallEnum + + +class AgentLinkedFileResponse(UncheckedBaseModel): + type: typing.Literal["file"] = "file" + link: LinkedFileRequest + on_agent_call: typing.Optional[OnAgentCallEnum] = None + file: typing.Optional["AgentLinkedFileResponseFile"] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402, F401, I001 + +update_forward_refs(AgentLinkedFileResponse) diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py new file mode 100644 index 00000000..ab1b384e --- /dev/null +++ b/src/humanloop/types/agent_linked_file_response_file.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .dataset_response import DatasetResponse + +if typing.TYPE_CHECKING: + from .agent_response import AgentResponse + from .evaluator_response import EvaluatorResponse + from .flow_response import FlowResponse + from .prompt_response import PromptResponse + from .tool_response import ToolResponse +AgentLinkedFileResponseFile = typing.Union[ + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" +] diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py new file mode 100644 index 00000000..634ad4d0 --- /dev/null +++ b/src/humanloop/types/agent_log_response.py @@ -0,0 +1,225 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_log_response_tool_choice import AgentLogResponseToolChoice +from .chat_message import ChatMessage +from .log_status import LogStatus + + +class AgentLogResponse(UncheckedBaseModel): + """ + General request for creating a Log + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: "AgentResponse" = pydantic.Field() + """ + Agent that generated the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(AgentLogResponse) diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py new file mode 100644 index 00000000..bf642cf5 --- /dev/null +++ b/src/humanloop/types/agent_log_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoice + +AgentLogResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py new file mode 100644 index 00000000..fb577067 --- /dev/null +++ b/src/humanloop/types/agent_log_stream_response.py @@ -0,0 +1,99 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage + + +class AgentLogStreamResponse(UncheckedBaseModel): + """ + Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + agent_id: str = pydantic.Field() + """ + ID of the Agent the log belongs to. + """ + + version_id: str = pydantic.Field() + """ + ID of the specific version of the Agent. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py new file mode 100644 index 00000000..cdc54812 --- /dev/null +++ b/src/humanloop/types/agent_response.py @@ -0,0 +1,266 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.serialization import FieldMetadata +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_response_reasoning_effort import AgentResponseReasoningEffort +from .agent_response_stop import AgentResponseStop +from .agent_response_template import AgentResponseTemplate +from .environment_response import EnvironmentResponse +from .evaluator_aggregate import EvaluatorAggregate +from .input_response import InputResponse +from .model_endpoints import ModelEndpoints +from .model_providers import ModelProviders +from .response_format import ResponseFormat +from .template_language import TemplateLanguage +from .user_response import UserResponse +from .version_status import VersionStatus + + +class AgentResponse(UncheckedBaseModel): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str = pydantic.Field() + """ + Path of the Agent, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Agent. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None) + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.List["AgentResponseToolsItem"] = pydantic.Field() + """ + List of tools that the Agent can call. These can be linked files or inline tools. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Agent version. Version names must be unique for a given Agent. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Agent. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + name: str = pydantic.Field() + """ + Name of the Agent. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. + """ + + type: typing.Optional[typing.Literal["agent"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Agent Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Agent. + """ + + committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who committed the Agent Version. + """ + + committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + The date and time the Agent Version was committed. + """ + + status: VersionStatus = pydantic.Field() + """ + The status of the Agent Version. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Agent Version + """ + + total_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated across all Agent Versions + """ + + inputs: typing.List[InputResponse] = pydantic.Field() + """ + Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + Evaluators that have been attached to this Agent that are used for monitoring logs. + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Agent Version. + """ + + raw_file_content: typing.Optional[str] = pydantic.Field(default=None) + """ + The raw content of the Agent. Corresponds to the .agent file. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402, F401, I001 + +update_forward_refs(AgentResponse) diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py new file mode 100644 index 00000000..b6fa28cd --- /dev/null +++ b/src/humanloop/types/agent_response_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py new file mode 100644 index 00000000..5c3b6a48 --- /dev/null +++ b/src/humanloop/types/agent_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py new file mode 100644 index 00000000..f5064815 --- /dev/null +++ b/src/humanloop/types/agent_response_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessage + +AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py new file mode 100644 index 00000000..da6970e2 --- /dev/null +++ b/src/humanloop/types/agent_response_tools_item.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .agent_inline_tool import AgentInlineTool + +if typing.TYPE_CHECKING: + from .agent_linked_file_response import AgentLinkedFileResponse +AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool] diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py new file mode 100644 index 00000000..3e8e782e --- /dev/null +++ b/src/humanloop/types/anthropic_redacted_thinking_content.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class AnthropicRedactedThinkingContent(UncheckedBaseModel): + type: typing.Literal["redacted_thinking"] = "redacted_thinking" + data: str = pydantic.Field() + """ + Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py new file mode 100644 index 00000000..f61501bd --- /dev/null +++ b/src/humanloop/types/anthropic_thinking_content.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class AnthropicThinkingContent(UncheckedBaseModel): + type: typing.Literal["thinking"] = "thinking" + thinking: str = pydantic.Field() + """ + Model's chain-of-thought for providing the response. + """ + + signature: str = pydantic.Field() + """ + Cryptographic signature that verifies the thinking block was generated by Anthropic. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/base_models_user_response.py b/src/humanloop/types/base_models_user_response.py new file mode 100644 index 00000000..8cd96829 --- /dev/null +++ b/src/humanloop/types/base_models_user_response.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BaseModelsUserResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/boolean_evaluator_stats_response.py b/src/humanloop/types/boolean_evaluator_stats_response.py new file mode 100644 index 00000000..9452d923 --- /dev/null +++ b/src/humanloop/types/boolean_evaluator_stats_response.py @@ -0,0 +1,58 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class BooleanEvaluatorStatsResponse(UncheckedBaseModel): + """ + Base attributes for stats for an Evaluator Version-Evaluated Version pair + in the Evaluation. + """ + + evaluator_version_id: str = pydantic.Field() + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int = pydantic.Field() + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int = pydantic.Field() + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int = pydantic.Field() + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int = pydantic.Field() + """ + The total number of errored Evaluators for this Evaluator Version. + """ + + num_true: int = pydantic.Field() + """ + The total number of `True` judgments for this Evaluator Version. + """ + + num_false: int = pydantic.Field() + """ + The total number of `False` judgments for this Evaluator Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py new file mode 100644 index 00000000..348752b5 --- /dev/null +++ b/src/humanloop/types/chat_message.py @@ -0,0 +1,52 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message_content import ChatMessageContent +from .chat_message_thinking_item import ChatMessageThinkingItem +from .chat_role import ChatRole +from .tool_call import ToolCall + + +class ChatMessage(UncheckedBaseModel): + content: typing.Optional[ChatMessageContent] = pydantic.Field(default=None) + """ + The content of the message. + """ + + name: typing.Optional[str] = pydantic.Field(default=None) + """ + Optional name of the message author. + """ + + tool_call_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Tool call that this message is responding to. + """ + + role: ChatRole = pydantic.Field() + """ + Role of the message author. + """ + + tool_calls: typing.Optional[typing.List[ToolCall]] = pydantic.Field(default=None) + """ + A list of tool calls requested by the assistant. + """ + + thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None) + """ + Model's chain-of-thought for providing the response. Present on assistant messages if model supports it. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/chat_message_content.py b/src/humanloop/types/chat_message_content.py new file mode 100644 index 00000000..fd31fa21 --- /dev/null +++ b/src/humanloop/types/chat_message_content.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message_content_item import ChatMessageContentItem + +ChatMessageContent = typing.Union[str, typing.List[ChatMessageContentItem]] diff --git a/src/humanloop/types/chat_message_content_item.py b/src/humanloop/types/chat_message_content_item.py new file mode 100644 index 00000000..1d27b28d --- /dev/null +++ b/src/humanloop/types/chat_message_content_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .image_chat_content import ImageChatContent +from .text_chat_content import TextChatContent + +ChatMessageContentItem = typing.Union[TextChatContent, ImageChatContent] diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py new file mode 100644 index 00000000..2885c825 --- /dev/null +++ b/src/humanloop/types/chat_message_thinking_item.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent +from .anthropic_thinking_content import AnthropicThinkingContent + +ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent] diff --git a/src/humanloop/types/chat_role.py b/src/humanloop/types/chat_role.py new file mode 100644 index 00000000..b5f6b1da --- /dev/null +++ b/src/humanloop/types/chat_role.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ChatRole = typing.Union[typing.Literal["user", "assistant", "system", "tool", "developer"], typing.Any] diff --git a/src/humanloop/types/chat_tool_type.py b/src/humanloop/types/chat_tool_type.py new file mode 100644 index 00000000..8e488088 --- /dev/null +++ b/src/humanloop/types/chat_tool_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ChatToolType = typing.Literal["function"] diff --git a/src/humanloop/types/code_evaluator_request.py b/src/humanloop/types/code_evaluator_request.py new file mode 100644 index 00000000..e8c574f9 --- /dev/null +++ b/src/humanloop/types/code_evaluator_request.py @@ -0,0 +1,59 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluator_arguments_type import EvaluatorArgumentsType +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse +from .evaluator_return_type_enum import EvaluatorReturnTypeEnum +from .valence import Valence + + +class CodeEvaluatorRequest(UncheckedBaseModel): + arguments_type: EvaluatorArgumentsType = pydantic.Field() + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: EvaluatorReturnTypeEnum = pydantic.Field() + """ + The type of the return value of the Evaluator. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) + """ + The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. + """ + + number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing.Optional[Valence] = pydantic.Field(default=None) + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["python"] = "python" + code: str = pydantic.Field() + """ + The code for the Evaluator. This code will be executed in a sandboxed environment. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/config_tool_response.py b/src/humanloop/types/config_tool_response.py new file mode 100644 index 00000000..7ed7682f --- /dev/null +++ b/src/humanloop/types/config_tool_response.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ConfigToolResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py new file mode 100644 index 00000000..2fe74aa4 --- /dev/null +++ b/src/humanloop/types/create_agent_log_response.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .log_status import LogStatus + + +class CreateAgentLogResponse(UncheckedBaseModel): + """ + Response for an Agent Log. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + agent_id: str = pydantic.Field() + """ + Unique identifier for the Agent. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the Agent Version. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_datapoint_request.py b/src/humanloop/types/create_datapoint_request.py new file mode 100644 index 00000000..31f3e4f7 --- /dev/null +++ b/src/humanloop/types/create_datapoint_request.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage +from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue + + +class CreateDatapointRequest(UncheckedBaseModel): + inputs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) + """ + The inputs to the prompt template. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + List of chat messages to provide to the model. + """ + + target: typing.Optional[typing.Dict[str, CreateDatapointRequestTargetValue]] = pydantic.Field(default=None) + """ + Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_datapoint_request_target_value.py b/src/humanloop/types/create_datapoint_request_target_value.py new file mode 100644 index 00000000..92a371fa --- /dev/null +++ b/src/humanloop/types/create_datapoint_request_target_value.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateDatapointRequestTargetValue = typing.Union[ + str, int, float, bool, typing.List[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] +] diff --git a/src/humanloop/types/create_evaluator_log_response.py b/src/humanloop/types/create_evaluator_log_response.py new file mode 100644 index 00000000..9f917d3d --- /dev/null +++ b/src/humanloop/types/create_evaluator_log_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class CreateEvaluatorLogResponse(UncheckedBaseModel): + id: str = pydantic.Field() + """ + String identifier of the new Log. + """ + + parent_id: str = pydantic.Field() + """ + Identifier of the evaluated parent Log. + """ + + session_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier of the Session containing both the parent and the new child Log. If the parent Log does not belong to a Session, a new Session is created with this ID. + """ + + version_id: str = pydantic.Field() + """ + Identifier of Evaluator Version for which the Log was registered. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_flow_log_response.py b/src/humanloop/types/create_flow_log_response.py new file mode 100644 index 00000000..ae296a6f --- /dev/null +++ b/src/humanloop/types/create_flow_log_response.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .log_status import LogStatus + + +class CreateFlowLogResponse(UncheckedBaseModel): + """ + Response for a Flow Log. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + flow_id: str = pydantic.Field() + """ + Unique identifier for the Flow. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the Flow Version. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_prompt_log_response.py b/src/humanloop/types/create_prompt_log_response.py new file mode 100644 index 00000000..cd80d43b --- /dev/null +++ b/src/humanloop/types/create_prompt_log_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class CreatePromptLogResponse(UncheckedBaseModel): + id: str = pydantic.Field() + """ + String ID of log. + """ + + prompt_id: str = pydantic.Field() + """ + ID of the Prompt the log belongs to. + """ + + version_id: str = pydantic.Field() + """ + ID of the specific version of the Prompt. + """ + + session_id: typing.Optional[str] = pydantic.Field(default=None) + """ + String ID of session the log belongs to. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/create_tool_log_response.py b/src/humanloop/types/create_tool_log_response.py new file mode 100644 index 00000000..6ba171fa --- /dev/null +++ b/src/humanloop/types/create_tool_log_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class CreateToolLogResponse(UncheckedBaseModel): + id: str = pydantic.Field() + """ + String ID of log. + """ + + tool_id: str = pydantic.Field() + """ + ID of the Tool the log belongs to. + """ + + version_id: str = pydantic.Field() + """ + ID of the specific version of the Tool. + """ + + session_id: typing.Optional[str] = pydantic.Field(default=None) + """ + String ID of session the log belongs to. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/dashboard_configuration.py b/src/humanloop/types/dashboard_configuration.py new file mode 100644 index 00000000..f5d752d8 --- /dev/null +++ b/src/humanloop/types/dashboard_configuration.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .time_unit import TimeUnit + + +class DashboardConfiguration(UncheckedBaseModel): + time_unit: TimeUnit + time_range_days: int + model_config_ids: typing.List[str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/datapoint_response.py b/src/humanloop/types/datapoint_response.py new file mode 100644 index 00000000..2eb4de68 --- /dev/null +++ b/src/humanloop/types/datapoint_response.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage +from .datapoint_response_target_value import DatapointResponseTargetValue + + +class DatapointResponse(UncheckedBaseModel): + inputs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) + """ + The inputs to the prompt template. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + List of chat messages to provide to the model. + """ + + target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]] = pydantic.Field(default=None) + """ + Object with criteria necessary to evaluate generations with this Datapoint. This is passed in as an argument to Evaluators when used in an Evaluation. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Datapoint. Starts with `dp_`. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/datapoint_response_target_value.py b/src/humanloop/types/datapoint_response_target_value.py new file mode 100644 index 00000000..c7f0f16e --- /dev/null +++ b/src/humanloop/types/datapoint_response_target_value.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DatapointResponseTargetValue = typing.Union[ + str, int, float, bool, typing.List[typing.Optional[typing.Any]], typing.Dict[str, typing.Optional[typing.Any]] +] diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py new file mode 100644 index 00000000..9153374a --- /dev/null +++ b/src/humanloop/types/dataset_response.py @@ -0,0 +1,117 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.serialization import FieldMetadata +from ..core.unchecked_base_model import UncheckedBaseModel +from .datapoint_response import DatapointResponse +from .environment_response import EnvironmentResponse +from .user_response import UserResponse + + +class DatasetResponse(UncheckedBaseModel): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str = pydantic.Field() + """ + Path of the Dataset, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Dataset. Starts with `ds_`. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + name: str = pydantic.Field() + """ + Name of the Dataset, which is used as a unique identifier. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Dataset. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Dataset Version. If no query params provided, the default deployed Dataset Version is returned. Starts with `dsv_`. + """ + + type: typing.Optional[typing.Literal["dataset"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Dataset Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Dataset. + """ + + last_used_at: dt.datetime + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Dataset version. Version names must be unique for a given Dataset. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. + """ + + datapoints_count: int = pydantic.Field() + """ + The number of Datapoints in this Dataset version. + """ + + datapoints: typing.Optional[typing.List[DatapointResponse]] = pydantic.Field(default=None) + """ + The list of Datapoints in this Dataset version. Only provided if explicitly requested. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/datasets_request.py b/src/humanloop/types/datasets_request.py new file mode 100644 index 00000000..84e126aa --- /dev/null +++ b/src/humanloop/types/datasets_request.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DatasetsRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/directory_response.py b/src/humanloop/types/directory_response.py new file mode 100644 index 00000000..a56f0732 --- /dev/null +++ b/src/humanloop/types/directory_response.py @@ -0,0 +1,57 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class DirectoryResponse(UncheckedBaseModel): + id: str = pydantic.Field() + """ + String ID of directory. Starts with `dir_`. + """ + + parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. + """ + + name: str = pydantic.Field() + """ + Name of the directory. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the directory. + """ + + path: str = pydantic.Field() + """ + Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the directory. + """ + + tags: typing.List[str] = pydantic.Field() + """ + List of tags associated with the directory. + """ + + created_at: dt.datetime + updated_at: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py new file mode 100644 index 00000000..a04de500 --- /dev/null +++ b/src/humanloop/types/directory_with_parents_and_children_response.py @@ -0,0 +1,88 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .directory_response import DirectoryResponse +from .directory_with_parents_and_children_response_files_item import DirectoryWithParentsAndChildrenResponseFilesItem + + +class DirectoryWithParentsAndChildrenResponse(UncheckedBaseModel): + id: str = pydantic.Field() + """ + String ID of directory. Starts with `dir_`. + """ + + parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the parent directory. Will be `None` if the directory is the root directory. Starts with `dir_`. + """ + + name: str = pydantic.Field() + """ + Name of the directory. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the directory. + """ + + path: str = pydantic.Field() + """ + Path to the directory, relative to the root directory. Includes name, e.g. `path/to/directory`. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the directory. + """ + + tags: typing.List[str] = pydantic.Field() + """ + List of tags associated with the directory. + """ + + created_at: dt.datetime + updated_at: dt.datetime + subdirectories: typing.List[DirectoryResponse] = pydantic.Field() + """ + List of subdirectories in the directory. + """ + + files: typing.List[DirectoryWithParentsAndChildrenResponseFilesItem] = pydantic.Field() + """ + List of files in the directory. + """ + + parents: typing.List[DirectoryResponse] = pydantic.Field() + """ + List of parent directories of the directory. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(DirectoryWithParentsAndChildrenResponse) diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py new file mode 100644 index 00000000..2c418d75 --- /dev/null +++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py @@ -0,0 +1,14 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponse +from .dataset_response import DatasetResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse + +DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[ + PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse +] diff --git a/src/humanloop/types/environment_response.py b/src/humanloop/types/environment_response.py new file mode 100644 index 00000000..23c0ab8f --- /dev/null +++ b/src/humanloop/types/environment_response.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_tag import EnvironmentTag + + +class EnvironmentResponse(UncheckedBaseModel): + id: str + created_at: dt.datetime + name: str + tag: EnvironmentTag + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/environment_tag.py b/src/humanloop/types/environment_tag.py new file mode 100644 index 00000000..f09bde15 --- /dev/null +++ b/src/humanloop/types/environment_tag.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EnvironmentTag = typing.Union[typing.Literal["default", "other"], typing.Any] diff --git a/src/humanloop/types/evaluatee_request.py b/src/humanloop/types/evaluatee_request.py new file mode 100644 index 00000000..a51c07aa --- /dev/null +++ b/src/humanloop/types/evaluatee_request.py @@ -0,0 +1,58 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluateeRequest(UncheckedBaseModel): + """ + Specification of a File version on Humanloop. + + This can be done in a couple of ways: + - Specifying `version_id` directly. + - Specifying a File (and optionally an Environment). + - A File can be specified by either `path` or `file_id`. + - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used. + """ + + version_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the File Version. If provided, none of the other fields should be specified. + """ + + path: typing.Optional[str] = pydantic.Field(default=None) + """ + Path identifying a File. Provide either this or `file_id` if you want to specify a File. + """ + + file_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the File. Provide either this or `path` if you want to specify a File. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used. + """ + + batch_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + orchestrated: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py new file mode 100644 index 00000000..0a2169e0 --- /dev/null +++ b/src/humanloop/types/evaluatee_response.py @@ -0,0 +1,60 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .run_version_response import RunVersionResponse + + +class EvaluateeResponse(UncheckedBaseModel): + """ + Version of the Evaluatee being evaluated. + """ + + version: typing.Optional[RunVersionResponse] = None + batch_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + orchestrated: bool = pydantic.Field() + """ + Whether the Prompt/Tool is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + """ + + pinned: bool = pydantic.Field() + """ + Pinned Evaluatees are shown in Humanloop's Overview, allowing you to use them as baselines for comparison. + """ + + added_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the Evaluatee was added to the Evaluation. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluateeResponse) diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py new file mode 100644 index 00000000..c63ebb8d --- /dev/null +++ b/src/humanloop/types/evaluation_evaluator_response.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluationEvaluatorResponse(UncheckedBaseModel): + version: "EvaluatorResponse" + orchestrated: bool = pydantic.Field() + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ + + added_at: dt.datetime = pydantic.Field() + """ + When the Evaluator was added to the Evaluation. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluationEvaluatorResponse) diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py new file mode 100644 index 00000000..bd2864f2 --- /dev/null +++ b/src/humanloop/types/evaluation_log_response.py @@ -0,0 +1,60 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .datapoint_response import DatapointResponse + + +class EvaluationLogResponse(UncheckedBaseModel): + run_id: str = pydantic.Field() + """ + Unique identifier for the Run. + """ + + datapoint: typing.Optional[DatapointResponse] = pydantic.Field(default=None) + """ + The Datapoint used to generate the Log + """ + + log: "LogResponse" = pydantic.Field() + """ + The Log that was evaluated by the Evaluator. + """ + + evaluator_logs: typing.List["LogResponse"] = pydantic.Field() + """ + The Evaluator Logs containing the judgments for the Log. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluationLogResponse) diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py new file mode 100644 index 00000000..dc32e6dc --- /dev/null +++ b/src/humanloop/types/evaluation_response.py @@ -0,0 +1,69 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluation_evaluator_response import EvaluationEvaluatorResponse +from .user_response import UserResponse + + +class EvaluationResponse(UncheckedBaseModel): + id: str = pydantic.Field() + """ + Unique identifier for the Evaluation. Starts with `evr`. + """ + + runs_count: int = pydantic.Field() + """ + The total number of Runs in the Evaluation. + """ + + evaluators: typing.List[EvaluationEvaluatorResponse] = pydantic.Field() + """ + The Evaluator Versions used to evaluate. + """ + + name: typing.Optional[str] = pydantic.Field(default=None) + """ + Name of the Evaluation to help identify it. Must be unique among Evaluations associated with File. + """ + + file_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the File associated with the Evaluation. + """ + + created_at: dt.datetime + created_by: typing.Optional[UserResponse] = None + updated_at: dt.datetime + url: typing.Optional[str] = pydantic.Field(default=None) + """ + URL to view the Evaluation on the Humanloop. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluationResponse) diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py new file mode 100644 index 00000000..d2977f63 --- /dev/null +++ b/src/humanloop/types/evaluation_run_response.py @@ -0,0 +1,83 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .dataset_response import DatasetResponse +from .evaluation_status import EvaluationStatus +from .run_version_response import RunVersionResponse +from .user_response import UserResponse + + +class EvaluationRunResponse(UncheckedBaseModel): + id: str = pydantic.Field() + """ + Unique identifier for the Run. + """ + + dataset: typing.Optional[DatasetResponse] = pydantic.Field(default=None) + """ + The Dataset used in the Run. + """ + + version: typing.Optional[RunVersionResponse] = pydantic.Field(default=None) + """ + The version used in the Run. + """ + + orchestrated: bool = pydantic.Field() + """ + Whether the Run is orchestrated by Humanloop. + """ + + added_at: dt.datetime = pydantic.Field() + """ + When the Run was added to the Evaluation. + """ + + created_at: dt.datetime = pydantic.Field() + """ + When the Run was created. + """ + + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The User who created the Run. + """ + + status: EvaluationStatus = pydantic.Field() + """ + The status of the Run. + """ + + control: bool = pydantic.Field() + """ + Stats for other Runs will be displayed in comparison to the control Run. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluationRunResponse) diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py new file mode 100644 index 00000000..e815d1e7 --- /dev/null +++ b/src/humanloop/types/evaluation_runs_response.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluation_run_response import EvaluationRunResponse + + +class EvaluationRunsResponse(UncheckedBaseModel): + runs: typing.List[EvaluationRunResponse] = pydantic.Field() + """ + The Runs in the Evaluation. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluationRunsResponse) diff --git a/src/humanloop/types/evaluation_stats.py b/src/humanloop/types/evaluation_stats.py new file mode 100644 index 00000000..656d45d0 --- /dev/null +++ b/src/humanloop/types/evaluation_stats.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluation_status import EvaluationStatus +from .run_stats_response import RunStatsResponse + + +class EvaluationStats(UncheckedBaseModel): + run_stats: typing.List[RunStatsResponse] = pydantic.Field() + """ + Stats for each Run in the Evaluation. + """ + + progress: typing.Optional[str] = pydantic.Field(default=None) + """ + A summary string report of the Evaluation's progress you can print to the command line;helpful when integrating Evaluations with CI/CD. + """ + + report: typing.Optional[str] = pydantic.Field(default=None) + """ + A summary string report of the Evaluation you can print to command line;helpful when integrating Evaluations with CI/CD. + """ + + status: EvaluationStatus = pydantic.Field() + """ + The current status of the Evaluation. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluation_status.py b/src/humanloop/types/evaluation_status.py new file mode 100644 index 00000000..9e45efbe --- /dev/null +++ b/src/humanloop/types/evaluation_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationStatus = typing.Union[typing.Literal["pending", "running", "completed", "cancelled"], typing.Any] diff --git a/src/humanloop/types/evaluations_dataset_request.py b/src/humanloop/types/evaluations_dataset_request.py new file mode 100644 index 00000000..a59bb83b --- /dev/null +++ b/src/humanloop/types/evaluations_dataset_request.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationsDatasetRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/evaluations_request.py b/src/humanloop/types/evaluations_request.py new file mode 100644 index 00000000..640f9b5f --- /dev/null +++ b/src/humanloop/types/evaluations_request.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluationsRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/evaluator_activation_deactivation_request.py b/src/humanloop/types/evaluator_activation_deactivation_request.py new file mode 100644 index 00000000..f9c6023e --- /dev/null +++ b/src/humanloop/types/evaluator_activation_deactivation_request.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluator_activation_deactivation_request_activate_item import EvaluatorActivationDeactivationRequestActivateItem +from .evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItem, +) + + +class EvaluatorActivationDeactivationRequest(UncheckedBaseModel): + activate: typing.Optional[typing.List[EvaluatorActivationDeactivationRequestActivateItem]] = pydantic.Field( + default=None + ) + """ + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + """ + + deactivate: typing.Optional[typing.List[EvaluatorActivationDeactivationRequestDeactivateItem]] = pydantic.Field( + default=None + ) + """ + Evaluators to deactivate. These will not be run on new Logs. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py b/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py new file mode 100644 index 00000000..6d2039b9 --- /dev/null +++ b/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest +from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest + +EvaluatorActivationDeactivationRequestActivateItem = typing.Union[ + MonitoringEvaluatorVersionRequest, MonitoringEvaluatorEnvironmentRequest +] diff --git a/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py b/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py new file mode 100644 index 00000000..6eb65d03 --- /dev/null +++ b/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest +from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest + +EvaluatorActivationDeactivationRequestDeactivateItem = typing.Union[ + MonitoringEvaluatorVersionRequest, MonitoringEvaluatorEnvironmentRequest +] diff --git a/src/humanloop/types/evaluator_aggregate.py b/src/humanloop/types/evaluator_aggregate.py new file mode 100644 index 00000000..5c24915a --- /dev/null +++ b/src/humanloop/types/evaluator_aggregate.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluatorAggregate(UncheckedBaseModel): + value: float = pydantic.Field() + """ + The aggregated value of the evaluator. + """ + + evaluator_id: str = pydantic.Field() + """ + ID of the evaluator. + """ + + evaluator_version_id: str = pydantic.Field() + """ + ID of the evaluator version. + """ + + created_at: dt.datetime + updated_at: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_arguments_type.py b/src/humanloop/types/evaluator_arguments_type.py new file mode 100644 index 00000000..56067b4a --- /dev/null +++ b/src/humanloop/types/evaluator_arguments_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluatorArgumentsType = typing.Union[typing.Literal["target_free", "target_required"], typing.Any] diff --git a/src/humanloop/types/evaluator_config_response.py b/src/humanloop/types/evaluator_config_response.py new file mode 100644 index 00000000..00bd5cd8 --- /dev/null +++ b/src/humanloop/types/evaluator_config_response.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluatorConfigResponse(UncheckedBaseModel): + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_file_id.py b/src/humanloop/types/evaluator_file_id.py new file mode 100644 index 00000000..6c3b3141 --- /dev/null +++ b/src/humanloop/types/evaluator_file_id.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluatorFileId(UncheckedBaseModel): + """ + Base model for specifying an Evaluator for an Evaluation. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the File. + """ + + orchestrated: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_file_path.py b/src/humanloop/types/evaluator_file_path.py new file mode 100644 index 00000000..cd967935 --- /dev/null +++ b/src/humanloop/types/evaluator_file_path.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluatorFilePath(UncheckedBaseModel): + """ + Base model for specifying an Evaluator for an Evaluation. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + path: str = pydantic.Field() + """ + Path identifying a File. Provide this to specify a File. + """ + + orchestrated: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_judgment_number_limit.py b/src/humanloop/types/evaluator_judgment_number_limit.py new file mode 100644 index 00000000..289afd4c --- /dev/null +++ b/src/humanloop/types/evaluator_judgment_number_limit.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluatorJudgmentNumberLimit(UncheckedBaseModel): + min: typing.Optional[float] = pydantic.Field(default=None) + """ + The minimum value that can be selected. + """ + + max: typing.Optional[float] = pydantic.Field(default=None) + """ + The maximum value that can be selected. + """ + + step: typing.Optional[float] = pydantic.Field(default=None) + """ + The step size for the number input. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_judgment_option_response.py b/src/humanloop/types/evaluator_judgment_option_response.py new file mode 100644 index 00000000..3b3a78e3 --- /dev/null +++ b/src/humanloop/types/evaluator_judgment_option_response.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .valence import Valence + + +class EvaluatorJudgmentOptionResponse(UncheckedBaseModel): + name: str = pydantic.Field() + """ + The name of the option. + """ + + valence: typing.Optional[Valence] = pydantic.Field(default=None) + """ + Whether this option should be considered positive or negative. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py new file mode 100644 index 00000000..e006e7a2 --- /dev/null +++ b/src/humanloop/types/evaluator_log_response.py @@ -0,0 +1,201 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage +from .evaluator_log_response_judgment import EvaluatorLogResponseJudgment +from .log_status import LogStatus + + +class EvaluatorLogResponse(UncheckedBaseModel): + """ + General request for creating a Log + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the LLM. Only populated for LLM Evaluator Logs. + """ + + judgment: typing.Optional[EvaluatorLogResponseJudgment] = pydantic.Field(default=None) + """ + Evaluator assessment of the Log. + """ + + marked_completed: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the Log has been manually marked as completed by a user. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + evaluator: "EvaluatorResponse" = pydantic.Field() + """ + Evaluator used to generate the judgment. + """ + + parent: typing.Optional["LogResponse"] = pydantic.Field(default=None) + """ + The Log that was evaluated. Only provided if the ?include_parent query parameter is set for the + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluatorLogResponse) diff --git a/src/humanloop/types/evaluator_log_response_judgment.py b/src/humanloop/types/evaluator_log_response_judgment.py new file mode 100644 index 00000000..fd0bbedd --- /dev/null +++ b/src/humanloop/types/evaluator_log_response_judgment.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluatorLogResponseJudgment = typing.Union[bool, str, typing.List[str], float] diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py new file mode 100644 index 00000000..0af149d3 --- /dev/null +++ b/src/humanloop/types/evaluator_response.py @@ -0,0 +1,146 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.serialization import FieldMetadata +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_response import EnvironmentResponse +from .evaluator_aggregate import EvaluatorAggregate +from .evaluator_response_spec import EvaluatorResponseSpec +from .input_response import InputResponse +from .user_response import UserResponse + + +class EvaluatorResponse(UncheckedBaseModel): + """ + Version of the Evaluator used to provide judgments. + """ + + path: str = pydantic.Field() + """ + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Evaluator. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. + """ + + spec: EvaluatorResponseSpec + name: str = pydantic.Field() + """ + Name of the Evaluator, which is used as a unique identifier. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Evaluator. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Evaluator Version. If no query params provided, the default deployed Evaluator Version is returned. + """ + + type: typing.Optional[typing.Literal["evaluator"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Evaluator Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Evaluator. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Evaluator Version + """ + + total_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated across all Evaluator Versions + """ + + inputs: typing.List[InputResponse] = pydantic.Field() + """ + Inputs associated to the Evaluator. Inputs correspond to any of the variables used within the Evaluator template. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + Evaluators that have been attached to this Evaluator that are used for monitoring logs. + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Evaluator Version. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(EvaluatorResponse) diff --git a/src/humanloop/types/evaluator_response_spec.py b/src/humanloop/types/evaluator_response_spec.py new file mode 100644 index 00000000..45eb1790 --- /dev/null +++ b/src/humanloop/types/evaluator_response_spec.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .code_evaluator_request import CodeEvaluatorRequest +from .external_evaluator_request import ExternalEvaluatorRequest +from .human_evaluator_request import HumanEvaluatorRequest +from .llm_evaluator_request import LlmEvaluatorRequest + +EvaluatorResponseSpec = typing.Union[ + LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest +] diff --git a/src/humanloop/types/evaluator_return_type_enum.py b/src/humanloop/types/evaluator_return_type_enum.py new file mode 100644 index 00000000..052a51d2 --- /dev/null +++ b/src/humanloop/types/evaluator_return_type_enum.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluatorReturnTypeEnum = typing.Union[ + typing.Literal["boolean", "number", "select", "multi_select", "text"], typing.Any +] diff --git a/src/humanloop/types/evaluator_version_id.py b/src/humanloop/types/evaluator_version_id.py new file mode 100644 index 00000000..688acf9a --- /dev/null +++ b/src/humanloop/types/evaluator_version_id.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class EvaluatorVersionId(UncheckedBaseModel): + """ + Base model for specifying an Evaluator for an Evaluation. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the Version. + """ + + orchestrated: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/evaluators_request.py b/src/humanloop/types/evaluators_request.py new file mode 100644 index 00000000..6c8cef34 --- /dev/null +++ b/src/humanloop/types/evaluators_request.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EvaluatorsRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py new file mode 100644 index 00000000..128eed92 --- /dev/null +++ b/src/humanloop/types/event_type.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EventType = typing.Union[ + typing.Literal[ + "agent_turn_start", + "agent_turn_suspend", + "agent_turn_continue", + "agent_turn_end", + "agent_start", + "agent_update", + "agent_end", + "tool_start", + "tool_update", + "tool_end", + "error", + "agent_generation_error", + ], + typing.Any, +] diff --git a/src/humanloop/types/external_evaluator_request.py b/src/humanloop/types/external_evaluator_request.py new file mode 100644 index 00000000..9f528f67 --- /dev/null +++ b/src/humanloop/types/external_evaluator_request.py @@ -0,0 +1,55 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluator_arguments_type import EvaluatorArgumentsType +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse +from .evaluator_return_type_enum import EvaluatorReturnTypeEnum +from .valence import Valence + + +class ExternalEvaluatorRequest(UncheckedBaseModel): + arguments_type: EvaluatorArgumentsType = pydantic.Field() + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: EvaluatorReturnTypeEnum = pydantic.Field() + """ + The type of the return value of the Evaluator. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) + """ + The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. + """ + + number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing.Optional[Valence] = pydantic.Field(default=None) + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["external"] = "external" + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/feedback_type.py b/src/humanloop/types/feedback_type.py new file mode 100644 index 00000000..5a964f16 --- /dev/null +++ b/src/humanloop/types/feedback_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +FeedbackType = typing.Optional[typing.Any] diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py new file mode 100644 index 00000000..f3a26ef0 --- /dev/null +++ b/src/humanloop/types/file_environment_response.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_tag import EnvironmentTag +from .file_environment_response_file import FileEnvironmentResponseFile + + +class FileEnvironmentResponse(UncheckedBaseModel): + """ + Response model for the List Environments endpoint under Files. + + Contains the deployed version of the File, if one is deployed to the Environment. + """ + + id: str + created_at: dt.datetime + name: str + tag: EnvironmentTag + file: typing.Optional[FileEnvironmentResponseFile] = pydantic.Field(default=None) + """ + The version of the File that is deployed to the Environment, if one is deployed. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(FileEnvironmentResponse) diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py new file mode 100644 index 00000000..2725177a --- /dev/null +++ b/src/humanloop/types/file_environment_response_file.py @@ -0,0 +1,14 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponse +from .dataset_response import DatasetResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse + +FileEnvironmentResponseFile = typing.Union[ + PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse +] diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py new file mode 100644 index 00000000..112e9602 --- /dev/null +++ b/src/humanloop/types/file_environment_variable_request.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class FileEnvironmentVariableRequest(UncheckedBaseModel): + name: str = pydantic.Field() + """ + Name of the environment variable. + """ + + value: str = pydantic.Field() + """ + Value of the environment variable. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_id.py b/src/humanloop/types/file_id.py new file mode 100644 index 00000000..fe049681 --- /dev/null +++ b/src/humanloop/types/file_id.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class FileId(UncheckedBaseModel): + """ + Specification of a File by its ID. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the File. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_path.py b/src/humanloop/types/file_path.py new file mode 100644 index 00000000..3f4f7591 --- /dev/null +++ b/src/humanloop/types/file_path.py @@ -0,0 +1,32 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class FilePath(UncheckedBaseModel): + """ + Specification of a File by its path. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + If provided, the Version deployed to this Environment is used. If not provided, the Version deployed to the default Environment is used. + """ + + path: str = pydantic.Field() + """ + Path identifying a File. Provide this to specify a File. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_request.py b/src/humanloop/types/file_request.py new file mode 100644 index 00000000..ba9518e2 --- /dev/null +++ b/src/humanloop/types/file_request.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class FileRequest(UncheckedBaseModel): + id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID for an existing File. + """ + + path: typing.Optional[str] = pydantic.Field(default=None) + """ + Path of the File, including the name. This locates the File in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_sort_by.py b/src/humanloop/types/file_sort_by.py new file mode 100644 index 00000000..b3135c3b --- /dev/null +++ b/src/humanloop/types/file_sort_by.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +FileSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any] diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py new file mode 100644 index 00000000..f235825b --- /dev/null +++ b/src/humanloop/types/file_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any] diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py new file mode 100644 index 00000000..753d9ba2 --- /dev/null +++ b/src/humanloop/types/files_tool_type.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +FilesToolType = typing.Union[ + typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any +] diff --git a/src/humanloop/types/flow_kernel_request.py b/src/humanloop/types/flow_kernel_request.py new file mode 100644 index 00000000..9b9adec9 --- /dev/null +++ b/src/humanloop/types/flow_kernel_request.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class FlowKernelRequest(UncheckedBaseModel): + attributes: typing.Dict[str, typing.Optional[typing.Any]] = pydantic.Field() + """ + A key-value object identifying the Flow Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py new file mode 100644 index 00000000..188c1fdf --- /dev/null +++ b/src/humanloop/types/flow_log_response.py @@ -0,0 +1,185 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage +from .log_status import LogStatus + + +class FlowLogResponse(UncheckedBaseModel): + """ + General request for creating a Log + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + List of chat messages that were used as an input to the Flow. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The output message returned by this Flow. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the Flow Log. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + flow: "FlowResponse" = pydantic.Field() + """ + Flow used to generate the Log. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(FlowLogResponse) diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py new file mode 100644 index 00000000..826b9238 --- /dev/null +++ b/src/humanloop/types/flow_response.py @@ -0,0 +1,133 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.serialization import FieldMetadata +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_response import EnvironmentResponse +from .evaluator_aggregate import EvaluatorAggregate +from .user_response import UserResponse + + +class FlowResponse(UncheckedBaseModel): + """ + Response model for a Flow. + """ + + path: str = pydantic.Field() + """ + Path of the Flow, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Flow. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + attributes: typing.Dict[str, typing.Optional[typing.Any]] = pydantic.Field() + """ + A key-value object identifying the Flow Version. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Flow version. Version names must be unique for a given Flow. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Version. + """ + + name: str = pydantic.Field() + """ + Name of the Flow. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Flow. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Flow Version. If no query params provided, the default deployed Flow Version is returned. + """ + + type: typing.Optional[typing.Literal["flow"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Flow Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Flow. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Flow Version + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Flow Version. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + The list of Monitoring Evaluators associated with the Flow Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(FlowResponse) diff --git a/src/humanloop/types/function_tool.py b/src/humanloop/types/function_tool.py new file mode 100644 index 00000000..faef2899 --- /dev/null +++ b/src/humanloop/types/function_tool.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class FunctionTool(UncheckedBaseModel): + """ + A function tool to be called by the model where user owns runtime. + """ + + name: str + arguments: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/function_tool_choice.py b/src/humanloop/types/function_tool_choice.py new file mode 100644 index 00000000..43d0eeb6 --- /dev/null +++ b/src/humanloop/types/function_tool_choice.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class FunctionToolChoice(UncheckedBaseModel): + """ + A function tool to be called by the model where user owns runtime. + """ + + name: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/http_validation_error.py b/src/humanloop/types/http_validation_error.py new file mode 100644 index 00000000..188935a0 --- /dev/null +++ b/src/humanloop/types/http_validation_error.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .validation_error import ValidationError + + +class HttpValidationError(UncheckedBaseModel): + detail: typing.Optional[typing.List[ValidationError]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/human_evaluator_request.py b/src/humanloop/types/human_evaluator_request.py new file mode 100644 index 00000000..ef604880 --- /dev/null +++ b/src/humanloop/types/human_evaluator_request.py @@ -0,0 +1,59 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluator_arguments_type import EvaluatorArgumentsType +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse +from .human_evaluator_request_return_type import HumanEvaluatorRequestReturnType +from .valence import Valence + + +class HumanEvaluatorRequest(UncheckedBaseModel): + arguments_type: EvaluatorArgumentsType = pydantic.Field() + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: HumanEvaluatorRequestReturnType = pydantic.Field() + """ + The type of the return value of the Evaluator. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) + """ + The options that can be applied as judgments. + """ + + number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing.Optional[Valence] = pydantic.Field(default=None) + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["human"] = "human" + instructions: typing.Optional[str] = pydantic.Field(default=None) + """ + Instructions and guidelines for applying judgments. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/human_evaluator_request_return_type.py b/src/humanloop/types/human_evaluator_request_return_type.py new file mode 100644 index 00000000..c6ae135e --- /dev/null +++ b/src/humanloop/types/human_evaluator_request_return_type.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +HumanEvaluatorRequestReturnType = typing.Union[ + typing.Literal["select", "multi_select", "text", "number", "boolean"], typing.Any +] diff --git a/src/humanloop/types/image_chat_content.py b/src/humanloop/types/image_chat_content.py new file mode 100644 index 00000000..9e12716d --- /dev/null +++ b/src/humanloop/types/image_chat_content.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .image_url import ImageUrl + + +class ImageChatContent(UncheckedBaseModel): + type: typing.Literal["image_url"] = "image_url" + image_url: ImageUrl = pydantic.Field() + """ + The message's image content. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/image_url.py b/src/humanloop/types/image_url.py new file mode 100644 index 00000000..ed170dea --- /dev/null +++ b/src/humanloop/types/image_url.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .image_url_detail import ImageUrlDetail + + +class ImageUrl(UncheckedBaseModel): + url: str = pydantic.Field() + """ + Either a URL of the image or the base64 encoded image data. + """ + + detail: typing.Optional[ImageUrlDetail] = pydantic.Field(default=None) + """ + Specify the detail level of the image provided to the model. For more details see: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/image_url_detail.py b/src/humanloop/types/image_url_detail.py new file mode 100644 index 00000000..43c7a47d --- /dev/null +++ b/src/humanloop/types/image_url_detail.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ImageUrlDetail = typing.Union[typing.Literal["high", "low", "auto"], typing.Any] diff --git a/src/humanloop/types/input_response.py b/src/humanloop/types/input_response.py new file mode 100644 index 00000000..36cfa6ed --- /dev/null +++ b/src/humanloop/types/input_response.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class InputResponse(UncheckedBaseModel): + name: str = pydantic.Field() + """ + Type of input. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py new file mode 100644 index 00000000..7ce2bc95 --- /dev/null +++ b/src/humanloop/types/linked_file_request.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class LinkedFileRequest(UncheckedBaseModel): + file_id: str + environment_id: typing.Optional[str] = None + version_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/linked_tool_response.py b/src/humanloop/types/linked_tool_response.py new file mode 100644 index 00000000..95bc2492 --- /dev/null +++ b/src/humanloop/types/linked_tool_response.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class LinkedToolResponse(UncheckedBaseModel): + name: str = pydantic.Field() + """ + Name for the tool referenced by the model. + """ + + description: str = pydantic.Field() + """ + Description of the tool referenced by the model + """ + + strict: typing.Optional[bool] = pydantic.Field(default=None) + """ + If true, forces the model to output json data in the structure of the parameters schema. + """ + + parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Tool linked. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the Tool Version linked. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py new file mode 100644 index 00000000..526d9b9b --- /dev/null +++ b/src/humanloop/types/list_agents.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ListAgents(UncheckedBaseModel): + records: typing.List["AgentResponse"] = pydantic.Field() + """ + The list of Agents. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(ListAgents) diff --git a/src/humanloop/types/list_datasets.py b/src/humanloop/types/list_datasets.py new file mode 100644 index 00000000..3e4de370 --- /dev/null +++ b/src/humanloop/types/list_datasets.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .dataset_response import DatasetResponse + + +class ListDatasets(UncheckedBaseModel): + records: typing.List[DatasetResponse] = pydantic.Field() + """ + The list of Datasets. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py new file mode 100644 index 00000000..4ec412cb --- /dev/null +++ b/src/humanloop/types/list_evaluators.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ListEvaluators(UncheckedBaseModel): + records: typing.List["EvaluatorResponse"] = pydantic.Field() + """ + The list of Evaluators. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(ListEvaluators) diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py new file mode 100644 index 00000000..ce407328 --- /dev/null +++ b/src/humanloop/types/list_flows.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ListFlows(UncheckedBaseModel): + records: typing.List["FlowResponse"] = pydantic.Field() + """ + The list of Flows. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(ListFlows) diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py new file mode 100644 index 00000000..42d01cf0 --- /dev/null +++ b/src/humanloop/types/list_prompts.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ListPrompts(UncheckedBaseModel): + records: typing.List["PromptResponse"] = pydantic.Field() + """ + The list of Prompts. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(ListPrompts) diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py new file mode 100644 index 00000000..7b2e7c70 --- /dev/null +++ b/src/humanloop/types/list_tools.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ListTools(UncheckedBaseModel): + records: typing.List["ToolResponse"] = pydantic.Field() + """ + The list of Tools. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(ListTools) diff --git a/src/humanloop/types/llm_evaluator_request.py b/src/humanloop/types/llm_evaluator_request.py new file mode 100644 index 00000000..c2061bfa --- /dev/null +++ b/src/humanloop/types/llm_evaluator_request.py @@ -0,0 +1,60 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluator_arguments_type import EvaluatorArgumentsType +from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit +from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse +from .evaluator_return_type_enum import EvaluatorReturnTypeEnum +from .prompt_kernel_request import PromptKernelRequest +from .valence import Valence + + +class LlmEvaluatorRequest(UncheckedBaseModel): + arguments_type: EvaluatorArgumentsType = pydantic.Field() + """ + Whether this Evaluator is target-free or target-required. + """ + + return_type: EvaluatorReturnTypeEnum = pydantic.Field() + """ + The type of the return value of the Evaluator. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Evaluator. Helpful to separate Evaluator versions from each other with details on how they were created or used. + """ + + options: typing.Optional[typing.List[EvaluatorJudgmentOptionResponse]] = pydantic.Field(default=None) + """ + The options that can be applied as judgments. Only for Evaluators with `return_type` of 'boolean', 'select' or 'multi_select'. + """ + + number_limits: typing.Optional[EvaluatorJudgmentNumberLimit] = pydantic.Field(default=None) + """ + Limits on the judgment that can be applied. Only for Evaluators with `return_type` of 'number'. + """ + + number_valence: typing.Optional[Valence] = pydantic.Field(default=None) + """ + The valence of the number judgment. Only for Evaluators with `return_type` of 'number'. If 'positive', a higher number is better. If 'negative', a lower number is better. + """ + + evaluator_type: typing.Literal["llm"] = "llm" + prompt: typing.Optional[PromptKernelRequest] = pydantic.Field(default=None) + """ + The prompt parameters used to generate. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py new file mode 100644 index 00000000..e6f60fcb --- /dev/null +++ b/src/humanloop/types/log_response.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +if typing.TYPE_CHECKING: + from .agent_log_response import AgentLogResponse + from .evaluator_log_response import EvaluatorLogResponse + from .flow_log_response import FlowLogResponse + from .prompt_log_response import PromptLogResponse + from .tool_log_response import ToolLogResponse +LogResponse = typing.Union[ + "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse" +] diff --git a/src/humanloop/types/log_status.py b/src/humanloop/types/log_status.py new file mode 100644 index 00000000..a09d61c0 --- /dev/null +++ b/src/humanloop/types/log_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LogStatus = typing.Union[typing.Literal["complete", "incomplete"], typing.Any] diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py new file mode 100644 index 00000000..2687e2ea --- /dev/null +++ b/src/humanloop/types/log_stream_response.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_log_stream_response import AgentLogStreamResponse +from .prompt_call_stream_response import PromptCallStreamResponse + +LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse] diff --git a/src/humanloop/types/model_endpoints.py b/src/humanloop/types/model_endpoints.py new file mode 100644 index 00000000..befb9936 --- /dev/null +++ b/src/humanloop/types/model_endpoints.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ModelEndpoints = typing.Union[typing.Literal["complete", "chat", "edit"], typing.Any] diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py new file mode 100644 index 00000000..3f2c99fb --- /dev/null +++ b/src/humanloop/types/model_providers.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ModelProviders = typing.Union[ + typing.Literal[ + "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate" + ], + typing.Any, +] diff --git a/src/humanloop/types/monitoring_evaluator_environment_request.py b/src/humanloop/types/monitoring_evaluator_environment_request.py new file mode 100644 index 00000000..cd3b8491 --- /dev/null +++ b/src/humanloop/types/monitoring_evaluator_environment_request.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class MonitoringEvaluatorEnvironmentRequest(UncheckedBaseModel): + evaluator_id: str = pydantic.Field() + """ + Unique identifier for the Evaluator to be used for monitoring. + """ + + environment_id: str = pydantic.Field() + """ + Unique identifier for the Environment. The Evaluator Version deployed to this Environment will be used for monitoring. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py new file mode 100644 index 00000000..1c08f955 --- /dev/null +++ b/src/humanloop/types/monitoring_evaluator_response.py @@ -0,0 +1,53 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .monitoring_evaluator_state import MonitoringEvaluatorState + + +class MonitoringEvaluatorResponse(UncheckedBaseModel): + version_reference: "VersionReferenceResponse" = pydantic.Field() + """ + The Evaluator Version used for monitoring. This can be a specific Version by ID, or a Version deployed to an Environment. + """ + + version: typing.Optional["EvaluatorResponse"] = pydantic.Field(default=None) + """ + The deployed Version. + """ + + state: MonitoringEvaluatorState = pydantic.Field() + """ + The state of the Monitoring Evaluator. Either `active` or `inactive` + """ + + created_at: dt.datetime + updated_at: dt.datetime + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .version_reference_response import VersionReferenceResponse # noqa: E402, F401, I001 + +update_forward_refs(MonitoringEvaluatorResponse) diff --git a/src/humanloop/types/monitoring_evaluator_state.py b/src/humanloop/types/monitoring_evaluator_state.py new file mode 100644 index 00000000..550c0fdb --- /dev/null +++ b/src/humanloop/types/monitoring_evaluator_state.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +MonitoringEvaluatorState = typing.Union[typing.Literal["active", "inactive"], typing.Any] diff --git a/src/humanloop/types/monitoring_evaluator_version_request.py b/src/humanloop/types/monitoring_evaluator_version_request.py new file mode 100644 index 00000000..8adfb290 --- /dev/null +++ b/src/humanloop/types/monitoring_evaluator_version_request.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class MonitoringEvaluatorVersionRequest(UncheckedBaseModel): + evaluator_version_id: str = pydantic.Field() + """ + Unique identifier for the Evaluator Version to be used for monitoring. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/numeric_evaluator_stats_response.py b/src/humanloop/types/numeric_evaluator_stats_response.py new file mode 100644 index 00000000..6bed0547 --- /dev/null +++ b/src/humanloop/types/numeric_evaluator_stats_response.py @@ -0,0 +1,53 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class NumericEvaluatorStatsResponse(UncheckedBaseModel): + """ + Base attributes for stats for an Evaluator Version-Evaluated Version pair + in the Evaluation. + """ + + evaluator_version_id: str = pydantic.Field() + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int = pydantic.Field() + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int = pydantic.Field() + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int = pydantic.Field() + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int = pydantic.Field() + """ + The total number of errored Evaluators for this Evaluator Version. + """ + + mean: typing.Optional[float] = None + sum: typing.Optional[float] = None + std: typing.Optional[float] = None + percentiles: typing.Dict[str, float] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/observability_status.py b/src/humanloop/types/observability_status.py new file mode 100644 index 00000000..a6b6ad71 --- /dev/null +++ b/src/humanloop/types/observability_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ObservabilityStatus = typing.Union[typing.Literal["pending", "running", "completed", "failed"], typing.Any] diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py new file mode 100644 index 00000000..3730256e --- /dev/null +++ b/src/humanloop/types/on_agent_call_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any] diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py new file mode 100644 index 00000000..d8c48547 --- /dev/null +++ b/src/humanloop/types/open_ai_reasoning_effort.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any] diff --git a/src/humanloop/types/overall_stats.py b/src/humanloop/types/overall_stats.py new file mode 100644 index 00000000..d00145b9 --- /dev/null +++ b/src/humanloop/types/overall_stats.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class OverallStats(UncheckedBaseModel): + num_datapoints: int = pydantic.Field() + """ + The total number of Datapoints in the Evaluation's Dataset Version. + """ + + total_logs: int = pydantic.Field() + """ + The total number of Logs in the Evaluation. + """ + + total_evaluator_logs: int = pydantic.Field() + """ + The total number of Evaluator Logs in the Evaluation. + """ + + total_human_evaluator_logs: int = pydantic.Field() + """ + The total number of human Evaluator Logs in the Evaluation Report. + """ + + total_completed_human_evaluator_logs: int = pydantic.Field() + """ + The total number of non-None human Evaluator Logs in the Evaluation Report. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py new file mode 100644 index 00000000..ecc67072 --- /dev/null +++ b/src/humanloop/types/paginated_data_agent_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class PaginatedDataAgentResponse(UncheckedBaseModel): + records: typing.List["AgentResponse"] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedDataAgentResponse) diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py new file mode 100644 index 00000000..17baff0c --- /dev/null +++ b/src/humanloop/types/paginated_data_evaluation_log_response.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluation_log_response import EvaluationLogResponse + + +class PaginatedDataEvaluationLogResponse(UncheckedBaseModel): + records: typing.List[EvaluationLogResponse] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedDataEvaluationLogResponse) diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py new file mode 100644 index 00000000..47a835e6 --- /dev/null +++ b/src/humanloop/types/paginated_data_evaluator_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class PaginatedDataEvaluatorResponse(UncheckedBaseModel): + records: typing.List["EvaluatorResponse"] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedDataEvaluatorResponse) diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py new file mode 100644 index 00000000..2775ec74 --- /dev/null +++ b/src/humanloop/types/paginated_data_flow_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class PaginatedDataFlowResponse(UncheckedBaseModel): + records: typing.List["FlowResponse"] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedDataFlowResponse) diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py new file mode 100644 index 00000000..1354146d --- /dev/null +++ b/src/humanloop/types/paginated_data_log_response.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class PaginatedDataLogResponse(UncheckedBaseModel): + records: typing.List["LogResponse"] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedDataLogResponse) diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py new file mode 100644 index 00000000..4487f88f --- /dev/null +++ b/src/humanloop/types/paginated_data_prompt_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class PaginatedDataPromptResponse(UncheckedBaseModel): + records: typing.List["PromptResponse"] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedDataPromptResponse) diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py new file mode 100644 index 00000000..131ddb69 --- /dev/null +++ b/src/humanloop/types/paginated_data_tool_response.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class PaginatedDataToolResponse(UncheckedBaseModel): + records: typing.List["ToolResponse"] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedDataToolResponse) diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py new file mode 100644 index 00000000..f6ee4be8 --- /dev/null +++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, +) + + +class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse( + UncheckedBaseModel +): + records: typing.List[ + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem + ] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse +) diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py new file mode 100644 index 00000000..ee28a684 --- /dev/null +++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py @@ -0,0 +1,14 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponse +from .dataset_response import DatasetResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse + +PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = ( + typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse] +) diff --git a/src/humanloop/types/paginated_datapoint_response.py b/src/humanloop/types/paginated_datapoint_response.py new file mode 100644 index 00000000..c82aa987 --- /dev/null +++ b/src/humanloop/types/paginated_datapoint_response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .datapoint_response import DatapointResponse + + +class PaginatedDatapointResponse(UncheckedBaseModel): + records: typing.List[DatapointResponse] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/paginated_dataset_response.py b/src/humanloop/types/paginated_dataset_response.py new file mode 100644 index 00000000..689c7276 --- /dev/null +++ b/src/humanloop/types/paginated_dataset_response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .dataset_response import DatasetResponse + + +class PaginatedDatasetResponse(UncheckedBaseModel): + records: typing.List[DatasetResponse] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py new file mode 100644 index 00000000..b9efe745 --- /dev/null +++ b/src/humanloop/types/paginated_evaluation_response.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluation_response import EvaluationResponse + + +class PaginatedEvaluationResponse(UncheckedBaseModel): + records: typing.List[EvaluationResponse] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PaginatedEvaluationResponse) diff --git a/src/humanloop/types/paginated_prompt_log_response.py b/src/humanloop/types/paginated_prompt_log_response.py new file mode 100644 index 00000000..50dd56aa --- /dev/null +++ b/src/humanloop/types/paginated_prompt_log_response.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PaginatedPromptLogResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/paginated_session_response.py b/src/humanloop/types/paginated_session_response.py new file mode 100644 index 00000000..aa0bfbca --- /dev/null +++ b/src/humanloop/types/paginated_session_response.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PaginatedSessionResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/platform_access_enum.py b/src/humanloop/types/platform_access_enum.py new file mode 100644 index 00000000..56da571b --- /dev/null +++ b/src/humanloop/types/platform_access_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PlatformAccessEnum = typing.Union[typing.Literal["superadmin", "supportadmin", "user"], typing.Any] diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py new file mode 100644 index 00000000..b27a3a90 --- /dev/null +++ b/src/humanloop/types/populate_template_response.py @@ -0,0 +1,258 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.serialization import FieldMetadata +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_response import EnvironmentResponse +from .evaluator_aggregate import EvaluatorAggregate +from .input_response import InputResponse +from .linked_tool_response import LinkedToolResponse +from .model_endpoints import ModelEndpoints +from .model_providers import ModelProviders +from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort +from .populate_template_response_stop import PopulateTemplateResponseStop +from .populate_template_response_template import PopulateTemplateResponseTemplate +from .response_format import ResponseFormat +from .template_language import TemplateLanguage +from .tool_function import ToolFunction +from .user_response import UserResponse + + +class PopulateTemplateResponse(UncheckedBaseModel): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str = pydantic.Field() + """ + Path of the Prompt, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Prompt. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[PopulateTemplateResponseTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[PopulateTemplateResponseStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None) + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) + """ + The tool specification that the model can choose to call if Tool calling is supported. + """ + + linked_tools: typing.Optional[typing.List[LinkedToolResponse]] = pydantic.Field(default=None) + """ + The tools linked to your prompt that the model can call. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Prompt. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + name: str = pydantic.Field() + """ + Name of the Prompt. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. + """ + + type: typing.Optional[typing.Literal["prompt"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Prompt Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Prompt. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Prompt Version + """ + + total_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated across all Prompt Versions + """ + + inputs: typing.List[InputResponse] = pydantic.Field() + """ + Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + Evaluators that have been attached to this Prompt that are used for monitoring logs. + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Prompt Version. + """ + + raw_file_content: typing.Optional[str] = pydantic.Field(default=None) + """ + The raw content of the Prompt. Corresponds to the .prompt file. + """ + + populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None) + """ + The template populated with the input values you provided in the request. Returns None if no template exists. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PopulateTemplateResponse) diff --git a/src/humanloop/types/populate_template_response_populated_template.py b/src/humanloop/types/populate_template_response_populated_template.py new file mode 100644 index 00000000..21a714b9 --- /dev/null +++ b/src/humanloop/types/populate_template_response_populated_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessage + +PopulateTemplateResponsePopulatedTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py new file mode 100644 index 00000000..af02db55 --- /dev/null +++ b/src/humanloop/types/populate_template_response_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/populate_template_response_stop.py b/src/humanloop/types/populate_template_response_stop.py new file mode 100644 index 00000000..5613c780 --- /dev/null +++ b/src/humanloop/types/populate_template_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PopulateTemplateResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/populate_template_response_template.py b/src/humanloop/types/populate_template_response_template.py new file mode 100644 index 00000000..d3b10e2e --- /dev/null +++ b/src/humanloop/types/populate_template_response_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessage + +PopulateTemplateResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/prompt_call_log_response.py b/src/humanloop/types/prompt_call_log_response.py new file mode 100644 index 00000000..2ec71bf1 --- /dev/null +++ b/src/humanloop/types/prompt_call_log_response.py @@ -0,0 +1,89 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage + + +class PromptCallLogResponse(UncheckedBaseModel): + """ + Sample specific response details for a Prompt call + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + index: int = pydantic.Field() + """ + The index of the sample in the batch. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py new file mode 100644 index 00000000..f20ce5f6 --- /dev/null +++ b/src/humanloop/types/prompt_call_response.py @@ -0,0 +1,136 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage +from .log_status import LogStatus +from .prompt_call_log_response import PromptCallLogResponse +from .prompt_call_response_tool_choice import PromptCallResponseToolChoice + + +class PromptCallResponse(UncheckedBaseModel): + """ + Response model for a Prompt call with potentially multiple log samples. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[PromptCallResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + prompt: "PromptResponse" = pydantic.Field() + """ + Prompt used to generate the Log. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the Trace containing the Prompt Call Log. + """ + + logs: typing.List[PromptCallLogResponse] = pydantic.Field() + """ + The logs generated by the Prompt call. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PromptCallResponse) diff --git a/src/humanloop/types/prompt_call_response_tool_choice.py b/src/humanloop/types/prompt_call_response_tool_choice.py new file mode 100644 index 00000000..7cb07ccc --- /dev/null +++ b/src/humanloop/types/prompt_call_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoice + +PromptCallResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/prompt_call_stream_response.py b/src/humanloop/types/prompt_call_stream_response.py new file mode 100644 index 00000000..48fffdee --- /dev/null +++ b/src/humanloop/types/prompt_call_stream_response.py @@ -0,0 +1,104 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage + + +class PromptCallStreamResponse(UncheckedBaseModel): + """ + Response model for calling Prompt in streaming mode. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + index: int = pydantic.Field() + """ + The index of the sample in the batch. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + prompt_id: str = pydantic.Field() + """ + ID of the Prompt the log belongs to. + """ + + version_id: str = pydantic.Field() + """ + ID of the specific version of the Prompt. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py new file mode 100644 index 00000000..03e5c624 --- /dev/null +++ b/src/humanloop/types/prompt_kernel_request.py @@ -0,0 +1,127 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .model_endpoints import ModelEndpoints +from .model_providers import ModelProviders +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort +from .prompt_kernel_request_stop import PromptKernelRequestStop +from .prompt_kernel_request_template import PromptKernelRequestTemplate +from .response_format import ResponseFormat +from .template_language import TemplateLanguage +from .tool_function import ToolFunction + + +class PromptKernelRequest(UncheckedBaseModel): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[PromptKernelRequestTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[PromptKernelRequestStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None) + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) + """ + The tool specification that the model can choose to call if Tool calling is supported. + """ + + linked_tools: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..b5fb8879 --- /dev/null +++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/prompt_kernel_request_stop.py b/src/humanloop/types/prompt_kernel_request_stop.py new file mode 100644 index 00000000..a4e55eb9 --- /dev/null +++ b/src/humanloop/types/prompt_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PromptKernelRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/prompt_kernel_request_template.py b/src/humanloop/types/prompt_kernel_request_template.py new file mode 100644 index 00000000..59cf99d3 --- /dev/null +++ b/src/humanloop/types/prompt_kernel_request_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessage + +PromptKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py new file mode 100644 index 00000000..8bea9781 --- /dev/null +++ b/src/humanloop/types/prompt_log_response.py @@ -0,0 +1,225 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage +from .log_status import LogStatus +from .prompt_log_response_tool_choice import PromptLogResponseToolChoice + + +class PromptLogResponse(UncheckedBaseModel): + """ + General request for creating a Log + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[PromptLogResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + prompt: "PromptResponse" = pydantic.Field() + """ + Prompt used to generate the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(PromptLogResponse) diff --git a/src/humanloop/types/prompt_log_response_tool_choice.py b/src/humanloop/types/prompt_log_response_tool_choice.py new file mode 100644 index 00000000..e7acf4bb --- /dev/null +++ b/src/humanloop/types/prompt_log_response_tool_choice.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .tool_choice import ToolChoice + +PromptLogResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py new file mode 100644 index 00000000..1a2b1490 --- /dev/null +++ b/src/humanloop/types/prompt_response.py @@ -0,0 +1,251 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.serialization import FieldMetadata +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_response import EnvironmentResponse +from .evaluator_aggregate import EvaluatorAggregate +from .input_response import InputResponse +from .linked_tool_response import LinkedToolResponse +from .model_endpoints import ModelEndpoints +from .model_providers import ModelProviders +from .prompt_response_reasoning_effort import PromptResponseReasoningEffort +from .prompt_response_stop import PromptResponseStop +from .prompt_response_template import PromptResponseTemplate +from .response_format import ResponseFormat +from .template_language import TemplateLanguage +from .tool_function import ToolFunction +from .user_response import UserResponse + + +class PromptResponse(UncheckedBaseModel): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str = pydantic.Field() + """ + Path of the Prompt, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Prompt. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[PromptResponseTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[PromptResponseStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None) + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) + """ + The tool specification that the model can choose to call if Tool calling is supported. + """ + + linked_tools: typing.Optional[typing.List[LinkedToolResponse]] = pydantic.Field(default=None) + """ + The tools linked to your prompt that the model can call. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Prompt. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + name: str = pydantic.Field() + """ + Name of the Prompt. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. + """ + + type: typing.Optional[typing.Literal["prompt"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Prompt Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Prompt. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Prompt Version + """ + + total_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated across all Prompt Versions + """ + + inputs: typing.List[InputResponse] = pydantic.Field() + """ + Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + Evaluators that have been attached to this Prompt that are used for monitoring logs. + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Prompt Version. + """ + + raw_file_content: typing.Optional[str] = pydantic.Field(default=None) + """ + The raw content of the Prompt. Corresponds to the .prompt file. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(PromptResponse) diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py new file mode 100644 index 00000000..86e9e7ad --- /dev/null +++ b/src/humanloop/types/prompt_response_reasoning_effort.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/prompt_response_stop.py b/src/humanloop/types/prompt_response_stop.py new file mode 100644 index 00000000..664140be --- /dev/null +++ b/src/humanloop/types/prompt_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PromptResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/prompt_response_template.py b/src/humanloop/types/prompt_response_template.py new file mode 100644 index 00000000..8a89bc04 --- /dev/null +++ b/src/humanloop/types/prompt_response_template.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .chat_message import ChatMessage + +PromptResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/provider_api_keys.py b/src/humanloop/types/provider_api_keys.py new file mode 100644 index 00000000..49bf8731 --- /dev/null +++ b/src/humanloop/types/provider_api_keys.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.serialization import FieldMetadata +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ProviderApiKeys(UncheckedBaseModel): + openai: typing.Optional[str] = None + ai_21: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="ai21")] = None + mock: typing.Optional[str] = None + anthropic: typing.Optional[str] = None + deepseek: typing.Optional[str] = None + bedrock: typing.Optional[str] = None + cohere: typing.Optional[str] = None + openai_azure: typing.Optional[str] = None + openai_azure_endpoint: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/response_format.py b/src/humanloop/types/response_format.py new file mode 100644 index 00000000..287019c4 --- /dev/null +++ b/src/humanloop/types/response_format.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .response_format_type import ResponseFormatType + + +class ResponseFormat(UncheckedBaseModel): + """ + Response format of the model. + """ + + type: ResponseFormatType + json_schema: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The JSON schema of the response format if type is json_schema. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/response_format_type.py b/src/humanloop/types/response_format_type.py new file mode 100644 index 00000000..3ecf0a30 --- /dev/null +++ b/src/humanloop/types/response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ResponseFormatType = typing.Union[typing.Literal["json_object", "json_schema"], typing.Any] diff --git a/src/humanloop/types/run_stats_response.py b/src/humanloop/types/run_stats_response.py new file mode 100644 index 00000000..3e385b26 --- /dev/null +++ b/src/humanloop/types/run_stats_response.py @@ -0,0 +1,54 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .evaluation_status import EvaluationStatus +from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItem + + +class RunStatsResponse(UncheckedBaseModel): + """ + Stats for a Run in the Evaluation. + """ + + run_id: str = pydantic.Field() + """ + Unique identifier for the Run. + """ + + version_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the evaluated Version. + """ + + batch_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + num_logs: int = pydantic.Field() + """ + The total number of existing Logs in this Run. + """ + + evaluator_stats: typing.List[RunStatsResponseEvaluatorStatsItem] = pydantic.Field() + """ + Stats for each Evaluator Version applied to this Run. + """ + + status: EvaluationStatus = pydantic.Field() + """ + The current status of the Run. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/run_stats_response_evaluator_stats_item.py b/src/humanloop/types/run_stats_response_evaluator_stats_item.py new file mode 100644 index 00000000..697efb12 --- /dev/null +++ b/src/humanloop/types/run_stats_response_evaluator_stats_item.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse +from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse +from .select_evaluator_stats_response import SelectEvaluatorStatsResponse +from .text_evaluator_stats_response import TextEvaluatorStatsResponse + +RunStatsResponseEvaluatorStatsItem = typing.Union[ + NumericEvaluatorStatsResponse, + BooleanEvaluatorStatsResponse, + SelectEvaluatorStatsResponse, + TextEvaluatorStatsResponse, +] diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py new file mode 100644 index 00000000..703bea5f --- /dev/null +++ b/src/humanloop/types/run_version_response.py @@ -0,0 +1,11 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse + +RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse] diff --git a/src/humanloop/types/select_evaluator_stats_response.py b/src/humanloop/types/select_evaluator_stats_response.py new file mode 100644 index 00000000..14068fef --- /dev/null +++ b/src/humanloop/types/select_evaluator_stats_response.py @@ -0,0 +1,52 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class SelectEvaluatorStatsResponse(UncheckedBaseModel): + """ + Also used for 'multi_select' Evaluator versions + """ + + evaluator_version_id: str = pydantic.Field() + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int = pydantic.Field() + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int = pydantic.Field() + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int = pydantic.Field() + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int = pydantic.Field() + """ + The total number of errored Evaluators for this Evaluator Version. + """ + + num_judgments_per_option: typing.Dict[str, int] = pydantic.Field() + """ + The total number of Evaluator judgments for this Evaluator Version. This is a mapping of the option name to the number of judgments for that option. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/sort_order.py b/src/humanloop/types/sort_order.py new file mode 100644 index 00000000..6dc54931 --- /dev/null +++ b/src/humanloop/types/sort_order.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SortOrder = typing.Union[typing.Literal["asc", "desc"], typing.Any] diff --git a/src/humanloop/types/template_language.py b/src/humanloop/types/template_language.py new file mode 100644 index 00000000..4b464b79 --- /dev/null +++ b/src/humanloop/types/template_language.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TemplateLanguage = typing.Union[typing.Literal["default", "jinja"], typing.Any] diff --git a/src/humanloop/types/text_chat_content.py b/src/humanloop/types/text_chat_content.py new file mode 100644 index 00000000..0db5d057 --- /dev/null +++ b/src/humanloop/types/text_chat_content.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class TextChatContent(UncheckedBaseModel): + type: typing.Literal["text"] = "text" + text: str = pydantic.Field() + """ + The message's text content. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/text_evaluator_stats_response.py b/src/humanloop/types/text_evaluator_stats_response.py new file mode 100644 index 00000000..bb38f996 --- /dev/null +++ b/src/humanloop/types/text_evaluator_stats_response.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class TextEvaluatorStatsResponse(UncheckedBaseModel): + """ + Base attributes for stats for an Evaluator Version-Evaluated Version pair + in the Evaluation. + """ + + evaluator_version_id: str = pydantic.Field() + """ + Unique identifier for the Evaluator Version. + """ + + total_logs: int = pydantic.Field() + """ + The total number of Logs generated by this Evaluator Version on the Evaluated Version's Logs. This includes Nulls and Errors. + """ + + num_judgments: int = pydantic.Field() + """ + The total number of Evaluator judgments for this Evaluator Version. This excludes Nulls and Errors. + """ + + num_nulls: int = pydantic.Field() + """ + The total number of null judgments (i.e. abstentions) for this Evaluator Version. + """ + + num_errors: int = pydantic.Field() + """ + The total number of errored Evaluators for this Evaluator Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/time_unit.py b/src/humanloop/types/time_unit.py new file mode 100644 index 00000000..57454139 --- /dev/null +++ b/src/humanloop/types/time_unit.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TimeUnit = typing.Union[typing.Literal["day", "week", "month"], typing.Any] diff --git a/src/humanloop/types/tool_call.py b/src/humanloop/types/tool_call.py new file mode 100644 index 00000000..11c9d7e9 --- /dev/null +++ b/src/humanloop/types/tool_call.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_tool_type import ChatToolType +from .function_tool import FunctionTool + + +class ToolCall(UncheckedBaseModel): + """ + A tool call to be made. + """ + + id: str + type: ChatToolType = "function" + function: FunctionTool + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py new file mode 100644 index 00000000..d3b660e1 --- /dev/null +++ b/src/humanloop/types/tool_call_response.py @@ -0,0 +1,175 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .log_status import LogStatus + + +class ToolCallResponse(UncheckedBaseModel): + """ + Response model for a Tool call. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + tool: "ToolResponse" = pydantic.Field() + """ + Tool used to generate the Log. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the Trace containing the Tool Call Log. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(ToolCallResponse) diff --git a/src/humanloop/types/tool_choice.py b/src/humanloop/types/tool_choice.py new file mode 100644 index 00000000..fad59550 --- /dev/null +++ b/src/humanloop/types/tool_choice.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_tool_type import ChatToolType +from .function_tool_choice import FunctionToolChoice + + +class ToolChoice(UncheckedBaseModel): + """ + Tool choice to force the model to use a tool. + """ + + type: ChatToolType = "function" + function: FunctionToolChoice + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_function.py b/src/humanloop/types/tool_function.py new file mode 100644 index 00000000..e0a29165 --- /dev/null +++ b/src/humanloop/types/tool_function.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ToolFunction(UncheckedBaseModel): + name: str = pydantic.Field() + """ + Name for the tool referenced by the model. + """ + + description: str = pydantic.Field() + """ + Description of the tool referenced by the model + """ + + strict: typing.Optional[bool] = pydantic.Field(default=None) + """ + If true, forces the model to output json data in the structure of the parameters schema. + """ + + parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Parameters needed to run the Tool, defined in JSON Schema format: https://json-schema.org/ + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_kernel_request.py b/src/humanloop/types/tool_kernel_request.py new file mode 100644 index 00000000..3e2d4afe --- /dev/null +++ b/src/humanloop/types/tool_kernel_request.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .tool_function import ToolFunction + + +class ToolKernelRequest(UncheckedBaseModel): + function: typing.Optional[ToolFunction] = pydantic.Field(default=None) + """ + Callable function specification of the Tool shown to the model for tool calling. + """ + + source_code: typing.Optional[str] = pydantic.Field(default=None) + """ + Code source of the Tool. + """ + + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py new file mode 100644 index 00000000..2524eb5b --- /dev/null +++ b/src/humanloop/types/tool_log_response.py @@ -0,0 +1,180 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .chat_message import ChatMessage +from .log_status import LogStatus + + +class ToolLogResponse(UncheckedBaseModel): + """ + General request for creating a Log + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + tool: "ToolResponse" = pydantic.Field() + """ + Tool used to generate the Log. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the Tool. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .log_response import LogResponse # noqa: E402, F401, I001 + +update_forward_refs(ToolLogResponse) diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py new file mode 100644 index 00000000..b2bca04b --- /dev/null +++ b/src/humanloop/types/tool_response.py @@ -0,0 +1,165 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_response import EnvironmentResponse +from .evaluator_aggregate import EvaluatorAggregate +from .files_tool_type import FilesToolType +from .input_response import InputResponse +from .tool_function import ToolFunction +from .user_response import UserResponse + + +class ToolResponse(UncheckedBaseModel): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str = pydantic.Field() + """ + Path of the Tool, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Tool. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + function: typing.Optional[ToolFunction] = pydantic.Field(default=None) + """ + Callable function specification of the Tool shown to the model for tool calling. + """ + + source_code: typing.Optional[str] = pydantic.Field(default=None) + """ + Code source of the Tool. + """ + + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + """ + + tool_type: typing.Optional[FilesToolType] = pydantic.Field(default=None) + """ + Type of Tool. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Version. + """ + + name: str = pydantic.Field() + """ + Name of the Tool, which is used as a unique identifier. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Tool. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Tool Version. If no query params provided, the default deployed Tool Version is returned. + """ + + type: typing.Optional[typing.Literal["tool"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Tool Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Tool. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Tool Version + """ + + total_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated across all Tool Versions + """ + + inputs: typing.List[InputResponse] = pydantic.Field() + """ + Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Tool template. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + Evaluators that have been attached to this Tool that are used for monitoring logs. + """ + + signature: typing.Optional[str] = pydantic.Field(default=None) + """ + Signature of the Tool. + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Tool Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 + +update_forward_refs(ToolResponse) diff --git a/src/humanloop/types/update_dateset_action.py b/src/humanloop/types/update_dateset_action.py new file mode 100644 index 00000000..d5264274 --- /dev/null +++ b/src/humanloop/types/update_dateset_action.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +UpdateDatesetAction = typing.Union[typing.Literal["set", "add", "remove"], typing.Any] diff --git a/src/humanloop/types/update_evaluation_status_request.py b/src/humanloop/types/update_evaluation_status_request.py new file mode 100644 index 00000000..cb507b69 --- /dev/null +++ b/src/humanloop/types/update_evaluation_status_request.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +UpdateEvaluationStatusRequest = typing.Optional[typing.Any] diff --git a/src/humanloop/types/update_version_request.py b/src/humanloop/types/update_version_request.py new file mode 100644 index 00000000..0587c889 --- /dev/null +++ b/src/humanloop/types/update_version_request.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class UpdateVersionRequest(UncheckedBaseModel): + name: typing.Optional[str] = pydantic.Field(default=None) + """ + Name of the version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/user_response.py b/src/humanloop/types/user_response.py new file mode 100644 index 00000000..5f41c81d --- /dev/null +++ b/src/humanloop/types/user_response.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +UserResponse = typing.Optional[typing.Any] diff --git a/src/humanloop/types/valence.py b/src/humanloop/types/valence.py new file mode 100644 index 00000000..4779efc2 --- /dev/null +++ b/src/humanloop/types/valence.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Valence = typing.Union[typing.Literal["positive", "negative", "neutral"], typing.Any] diff --git a/src/humanloop/types/validation_error.py b/src/humanloop/types/validation_error.py new file mode 100644 index 00000000..0438bc05 --- /dev/null +++ b/src/humanloop/types/validation_error.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .validation_error_loc_item import ValidationErrorLocItem + + +class ValidationError(UncheckedBaseModel): + loc: typing.List[ValidationErrorLocItem] + msg: str + type: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/validation_error_loc_item.py b/src/humanloop/types/validation_error_loc_item.py new file mode 100644 index 00000000..9a0a83fe --- /dev/null +++ b/src/humanloop/types/validation_error_loc_item.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ValidationErrorLocItem = typing.Union[str, int] diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py new file mode 100644 index 00000000..fdee59de --- /dev/null +++ b/src/humanloop/types/version_deployment_response.py @@ -0,0 +1,50 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel +from .environment_response import EnvironmentResponse + + +class VersionDeploymentResponse(UncheckedBaseModel): + """ + A variable reference to the Version deployed to an Environment + """ + + file: "VersionDeploymentResponseFile" = pydantic.Field() + """ + The File that the deployed Version belongs to. + """ + + environment: EnvironmentResponse = pydantic.Field() + """ + The Environment that the Version is deployed to. + """ + + type: typing.Literal["environment"] = "environment" + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_id_response import VersionIdResponse # noqa: E402, F401, I001 +from .version_deployment_response_file import VersionDeploymentResponseFile # noqa: E402, F401, I001 + +update_forward_refs(VersionDeploymentResponse) diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py new file mode 100644 index 00000000..130f2c1c --- /dev/null +++ b/src/humanloop/types/version_deployment_response_file.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .dataset_response import DatasetResponse + +if typing.TYPE_CHECKING: + from .agent_response import AgentResponse + from .evaluator_response import EvaluatorResponse + from .flow_response import FlowResponse + from .prompt_response import PromptResponse + from .tool_response import ToolResponse +VersionDeploymentResponseFile = typing.Union[ + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" +] diff --git a/src/humanloop/types/version_id.py b/src/humanloop/types/version_id.py new file mode 100644 index 00000000..51de3db1 --- /dev/null +++ b/src/humanloop/types/version_id.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class VersionId(UncheckedBaseModel): + version_id: str = pydantic.Field() + """ + Unique identifier for the Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py new file mode 100644 index 00000000..47aa53db --- /dev/null +++ b/src/humanloop/types/version_id_response.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs +from ..core.unchecked_base_model import UncheckedBaseModel + + +class VersionIdResponse(UncheckedBaseModel): + """ + A reference to a specific Version by its ID + """ + + version: "VersionIdResponseVersion" = pydantic.Field() + """ + The specific Version being referenced. + """ + + type: typing.Literal["version"] = "version" + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001 +from .agent_response import AgentResponse # noqa: E402, F401, I001 +from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001 +from .flow_response import FlowResponse # noqa: E402, F401, I001 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001 +from .prompt_response import PromptResponse # noqa: E402, F401, I001 +from .tool_response import ToolResponse # noqa: E402, F401, I001 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001 +from .version_id_response_version import VersionIdResponseVersion # noqa: E402, F401, I001 + +update_forward_refs(VersionIdResponse) diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py new file mode 100644 index 00000000..eff8378c --- /dev/null +++ b/src/humanloop/types/version_id_response_version.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from .dataset_response import DatasetResponse + +if typing.TYPE_CHECKING: + from .agent_response import AgentResponse + from .evaluator_response import EvaluatorResponse + from .flow_response import FlowResponse + from .prompt_response import PromptResponse + from .tool_response import ToolResponse +VersionIdResponseVersion = typing.Union[ + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" +] diff --git a/src/humanloop/types/version_reference_response.py b/src/humanloop/types/version_reference_response.py new file mode 100644 index 00000000..7785a8f1 --- /dev/null +++ b/src/humanloop/types/version_reference_response.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +if typing.TYPE_CHECKING: + from .version_deployment_response import VersionDeploymentResponse + from .version_id_response import VersionIdResponse +VersionReferenceResponse = typing.Union["VersionDeploymentResponse", "VersionIdResponse"] diff --git a/src/humanloop/types/version_stats_response.py b/src/humanloop/types/version_stats_response.py new file mode 100644 index 00000000..be1c6286 --- /dev/null +++ b/src/humanloop/types/version_stats_response.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem + + +class VersionStatsResponse(UncheckedBaseModel): + version_id: str = pydantic.Field() + """ + Unique identifier for the evaluated Version. + """ + + batch_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the batch of Logs to include in the Evaluation. + """ + + num_logs: int = pydantic.Field() + """ + The total number of existing Logs in this Run. + """ + + evaluator_version_stats: typing.List[VersionStatsResponseEvaluatorVersionStatsItem] = pydantic.Field() + """ + Stats for each Evaluator Version applied to this Run. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py b/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py new file mode 100644 index 00000000..a7b9fb21 --- /dev/null +++ b/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse +from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse +from .select_evaluator_stats_response import SelectEvaluatorStatsResponse +from .text_evaluator_stats_response import TextEvaluatorStatsResponse + +VersionStatsResponseEvaluatorVersionStatsItem = typing.Union[ + NumericEvaluatorStatsResponse, + BooleanEvaluatorStatsResponse, + SelectEvaluatorStatsResponse, + TextEvaluatorStatsResponse, +] diff --git a/src/humanloop/types/version_status.py b/src/humanloop/types/version_status.py new file mode 100644 index 00000000..fd31428e --- /dev/null +++ b/src/humanloop/types/version_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VersionStatus = typing.Union[typing.Literal["uncommitted", "committed", "deleted"], typing.Any] diff --git a/src/humanloop/version.py b/src/humanloop/version.py new file mode 100644 index 00000000..13ce3bc0 --- /dev/null +++ b/src/humanloop/version.py @@ -0,0 +1,3 @@ +from importlib import metadata + +__version__ = metadata.version("humanloop")