diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d60fae21f4..506bfad55f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.119.0" + ".": "1.120.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6413491acf..6d4a43adfc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## [1.120.0](https://github.com/googleapis/python-aiplatform/compare/v1.119.0...v1.120.0) (2025-10-08) + + +### Features + +* Add ability to use existing sessions for streaming_agent_run_with_events calls. ([0a369ea](https://github.com/googleapis/python-aiplatform/commit/0a369eacaa4e21e8055ef7c2e0f4c6da3a9298f8)) +* Add DeploymentTier enum to DeployedIndex ([89a26c1](https://github.com/googleapis/python-aiplatform/commit/89a26c15b8a15c8698192dc283e5839729ad3e66)) +* Add labels field for Predict API for Imagen use case (v1beta) ([89a26c1](https://github.com/googleapis/python-aiplatform/commit/89a26c15b8a15c8698192dc283e5839729ad3e66)) +* Auto-generated CL for //google/cloud/aiplatform:aiplatform_v1_public_proto_gen ([89a26c1](https://github.com/googleapis/python-aiplatform/commit/89a26c15b8a15c8698192dc283e5839729ad3e66)) +* Expose PSC for CustomModel ([d02099c](https://github.com/googleapis/python-aiplatform/commit/d02099cf200f7e372ad0c38f98a5b8c92bcc581c)) +* GenAI Client(evals) - Add `get_evaluation_set` and `get_evaluation_item` methods to Vertex AI GenAI SDK evals ([7757886](https://github.com/googleapis/python-aiplatform/commit/77578867ff7c4e8a9c4618481821cded32b4b135)) +* GenAI Client(evals) - Add `show` method for EvaluationRun class in Vertex AI GenAI SDK evals ([0c932b9](https://github.com/googleapis/python-aiplatform/commit/0c932b99bafde099734cf136828583de23fbaeb6)) +* Migrate vertex ai session service to use agent engine sdk ([b72df1c](https://github.com/googleapis/python-aiplatform/commit/b72df1c3e418c4b87ecb92e1506e7f3fe8da3994)) +* Migrate vertex_ai_session_service to use Agent Engine SDK ([55b54a2](https://github.com/googleapis/python-aiplatform/commit/55b54a21005587338abdd66c7605f534294b0c94)) +* Migrate vertex_ai_session_service to use Agent Engine SDK ([03772e2](https://github.com/googleapis/python-aiplatform/commit/03772e2a87f2fc6c66a69220e004522cc6ba1f94)) + + +### Bug Fixes + +* GenAI Client(evals) - Parse user defined rubrics correctly to LLM metric ([64b0665](https://github.com/googleapis/python-aiplatform/commit/64b0665169b045ccfd16e537af2e45a4dd7977e0)) + + +### Documentation + +* Fix Sessions SDK docstrings ([0077bde](https://github.com/googleapis/python-aiplatform/commit/0077bdebe62d0363c1faacd5f6e5562086dff080)) + ## [1.119.0](https://github.com/googleapis/python-aiplatform/compare/v1.118.0...v1.119.0) (2025-10-03) diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/matching_engine/matching_engine_index.py b/google/cloud/aiplatform/matching_engine/matching_engine_index.py index 209458dcd9..b5a521d1e1 100644 --- a/google/cloud/aiplatform/matching_engine/matching_engine_index.py +++ b/google/cloud/aiplatform/matching_engine/matching_engine_index.py @@ -588,6 +588,7 @@ def create_tree_ah_index( SHARD_SIZE_SMALL SHARD_SIZE_MEDIUM SHARD_SIZE_LARGE + SHARD_SIZE_SO_DYNAMIC Returns: @@ -740,6 +741,7 @@ def create_brute_force_index( SHARD_SIZE_SMALL SHARD_SIZE_MEDIUM SHARD_SIZE_LARGE + SHARD_SIZE_SO_DYNAMIC Returns: MatchingEngineIndex - Index resource object diff --git a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py index d81c3e60d6..c5694a4517 100644 --- a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py +++ b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py @@ -944,6 +944,7 @@ def _build_deployed_index( auth_config_audiences: Optional[Sequence[str]] = None, auth_config_allowed_issuers: Optional[Sequence[str]] = None, psc_automation_configs: Optional[Sequence[Tuple[str, str]]] = None, + deployment_tier: Optional[str] = None, ) -> gca_matching_engine_index_endpoint.DeployedIndex: """Builds a DeployedIndex. @@ -1046,6 +1047,8 @@ def _build_deployed_index( projects/{project}/global/networks/{network}, where {project} is a project number, as in '12345', and {network} is network name. + deployment_tier (str): + Optional. The deployment tier that the index is deployed to. """ @@ -1056,6 +1059,7 @@ def _build_deployed_index( enable_access_logging=enable_access_logging, reserved_ip_ranges=reserved_ip_ranges, deployment_group=deployment_group, + deployment_tier=deployment_tier, ) if auth_config_audiences and auth_config_allowed_issuers: @@ -1115,6 +1119,7 @@ def deploy_index( sync: bool = True, deploy_request_timeout: Optional[float] = None, psc_automation_configs: Optional[Sequence[Tuple[str, str]]] = None, + deployment_tier: Optional[str] = None, ) -> "MatchingEngineIndexEndpoint": """Deploys an existing index resource to this endpoint resource. @@ -1231,6 +1236,8 @@ def deploy_index( [(project_id_1, network_1), (project_id_1, network_2))] will enable PSC automation for the index to be deployed to project_id_1's network_1 and network_2 and can be queried within these networks. + deployment_tier (str): + Optional. The deployment tier that the index is deployed to. Returns: MatchingEngineIndexEndpoint - IndexEndpoint resource object """ @@ -1250,6 +1257,7 @@ def deploy_index( sync=sync, deploy_request_timeout=deploy_request_timeout, psc_automation_configs=psc_automation_configs, + deployment_tier=deployment_tier, ) @base.optional_sync(return_input_arg="self") @@ -1270,6 +1278,7 @@ def _deploy_index( sync: bool = True, deploy_request_timeout: Optional[float] = None, psc_automation_configs: Optional[Sequence[Tuple[str, str]]] = None, + deployment_tier: Optional[str] = None, ) -> "MatchingEngineIndexEndpoint": """Helper method to deploy an existing index resource to this endpoint resource. @@ -1386,6 +1395,8 @@ def _deploy_index( [(project_id_1, network_1), (project_id_1, network_2))] will enable PSC automation for the index to be deployed to project_id_1's network_1 and network_2 and can be queried within these networks. + deployment_tier (str): + Optional. The deployment tier that the index is deployed to. Returns: MatchingEngineIndexEndpoint - IndexEndpoint resource object """ @@ -1411,6 +1422,7 @@ def _deploy_index( auth_config_audiences=auth_config_audiences, auth_config_allowed_issuers=auth_config_allowed_issuers, psc_automation_configs=psc_automation_configs, + deployment_tier=deployment_tier, ) deploy_lro = self.api_client.deploy_index( diff --git a/google/cloud/aiplatform/utils/pipeline_utils.py b/google/cloud/aiplatform/utils/pipeline_utils.py index 88323efe76..5791a54338 100644 --- a/google/cloud/aiplatform/utils/pipeline_utils.py +++ b/google/cloud/aiplatform/utils/pipeline_utils.py @@ -68,6 +68,10 @@ def __init__( self._input_artifacts = copy.deepcopy(input_artifacts or {}) self._failure_policy = failure_policy self._default_runtime = default_runtime + self._parsed_schema_version = packaging.version.parse(schema_version) + self._is_version_gt_2 = self._parsed_schema_version > packaging.version.parse( + "2.0.0" + ) @classmethod def from_job_spec_json( @@ -188,9 +192,7 @@ def build(self) -> Dict[str, Any]: "Pipeline root must be specified, either during " "compile time, or when calling the service." ) - if packaging.version.parse(self._schema_version) > packaging.version.parse( - "2.0.0" - ): + if self._is_version_gt_2: parameter_values_key = "parameterValues" else: parameter_values_key = "parameters" @@ -243,9 +245,7 @@ def _get_vertex_value( "pipeline job input definitions.".format(name) ) - if packaging.version.parse(self._schema_version) <= packaging.version.parse( - "2.0.0" - ): + if not self._is_version_gt_2: result = {} if self._parameter_types[name] == "INT": result["intValue"] = value diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 20b3ea909a..6fe505dcba 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.119.0" +__version__ = "1.120.0" diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 4ec75fa0e3..586f742c72 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -265,40 +265,40 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/types/accelerator_type.py b/google/cloud/aiplatform_v1/types/accelerator_type.py index 65e17a1e37..44c8fb5cb4 100644 --- a/google/cloud/aiplatform_v1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1/types/accelerator_type.py @@ -63,6 +63,8 @@ class AcceleratorType(proto.Enum): Nvidia B200 GPU. NVIDIA_GB200 (17): Nvidia GB200 GPU. + NVIDIA_RTX_PRO_6000 (18): + Nvidia RTX Pro 6000 GPU. TPU_V2 (6): TPU v2. TPU_V3 (7): @@ -87,6 +89,7 @@ class AcceleratorType(proto.Enum): NVIDIA_H200_141GB = 15 NVIDIA_B200 = 16 NVIDIA_GB200 = 17 + NVIDIA_RTX_PRO_6000 = 18 TPU_V2 = 6 TPU_V3 = 7 TPU_V4_POD = 10 diff --git a/google/cloud/aiplatform_v1/types/index_endpoint.py b/google/cloud/aiplatform_v1/types/index_endpoint.py index f791cb5484..bfbea76141 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -342,6 +342,10 @@ class DeployedIndex(proto.Message): Note: we only support up to 5 deployment groups(not including 'default'). + deployment_tier (google.cloud.aiplatform_v1.types.DeployedIndex.DeploymentTier): + Optional. The deployment tier that the index is deployed to. + DEPLOYMENT_TIER_UNSPECIFIED will use a system-chosen default + tier. psc_automation_configs (MutableSequence[google.cloud.aiplatform_v1.types.PSCAutomationConfig]): Optional. If set for PSC deployed index, PSC connection will be automatically created after deployment is done and the @@ -349,6 +353,20 @@ class DeployedIndex(proto.Message): private_endpoints.psc_automated_endpoints. """ + class DeploymentTier(proto.Enum): + r"""Tiers encapsulate serving time attributes like latency and + throughput. + + Values: + DEPLOYMENT_TIER_UNSPECIFIED (0): + Default deployment tier. + STORAGE (2): + Optimized for costs. + """ + + DEPLOYMENT_TIER_UNSPECIFIED = 0 + STORAGE = 2 + id: str = proto.Field( proto.STRING, number=1, @@ -407,6 +425,11 @@ class DeployedIndex(proto.Message): proto.STRING, number=11, ) + deployment_tier: DeploymentTier = proto.Field( + proto.ENUM, + number=18, + enum=DeploymentTier, + ) psc_automation_configs: MutableSequence[service_networking.PSCAutomationConfig] = ( proto.RepeatedField( proto.MESSAGE, diff --git a/google/cloud/aiplatform_v1/types/tool.py b/google/cloud/aiplatform_v1/types/tool.py index 35497bcddd..a07018da3f 100644 --- a/google/cloud/aiplatform_v1/types/tool.py +++ b/google/cloud/aiplatform_v1/types/tool.py @@ -106,21 +106,73 @@ class Tool(proto.Message): specific Function Declarations. """ + class PhishBlockThreshold(proto.Enum): + r"""These are available confidence level user can set to block + malicious urls with chosen confidence and above. For + understanding different confidence of webrisk, please refer to + https://cloud.google.com/web-risk/docs/reference/rpc/google.cloud.webrisk.v1eap1#confidencelevel + + Values: + PHISH_BLOCK_THRESHOLD_UNSPECIFIED (0): + Defaults to unspecified. + BLOCK_LOW_AND_ABOVE (30): + Blocks Low and above confidence URL that is + risky. + BLOCK_MEDIUM_AND_ABOVE (40): + Blocks Medium and above confidence URL that + is risky. + BLOCK_HIGH_AND_ABOVE (50): + Blocks High and above confidence URL that is + risky. + BLOCK_HIGHER_AND_ABOVE (55): + Blocks Higher and above confidence URL that + is risky. + BLOCK_VERY_HIGH_AND_ABOVE (60): + Blocks Very high and above confidence URL + that is risky. + BLOCK_ONLY_EXTREMELY_HIGH (100): + Blocks Extremely high confidence URL that is + risky. + """ + + PHISH_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 30 + BLOCK_MEDIUM_AND_ABOVE = 40 + BLOCK_HIGH_AND_ABOVE = 50 + BLOCK_HIGHER_AND_ABOVE = 55 + BLOCK_VERY_HIGH_AND_ABOVE = 60 + BLOCK_ONLY_EXTREMELY_HIGH = 100 + class GoogleSearch(proto.Message): r"""GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. + blocking_confidence (google.cloud.aiplatform_v1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class CodeExecution(proto.Message): r"""Tool that executes code generated by the model, and automatically @@ -692,17 +744,32 @@ class EnterpriseWebSearch(proto.Message): r"""Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. + blocking_confidence (google.cloud.aiplatform_v1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=2, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class DynamicRetrievalConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 0b00060299..97d74b86b4 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -243,40 +243,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py index ef459f6c45..62e6dec05d 100644 --- a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py @@ -63,6 +63,8 @@ class AcceleratorType(proto.Enum): Nvidia B200 GPU. NVIDIA_GB200 (17): Nvidia GB200 GPU. + NVIDIA_RTX_PRO_6000 (18): + Nvidia RTX Pro 6000 GPU. TPU_V2 (6): TPU v2. TPU_V3 (7): @@ -87,6 +89,7 @@ class AcceleratorType(proto.Enum): NVIDIA_H200_141GB = 15 NVIDIA_B200 = 16 NVIDIA_GB200 = 17 + NVIDIA_RTX_PRO_6000 = 18 TPU_V2 = 6 TPU_V3 = 7 TPU_V4_POD = 10 diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index 7d9d7a93bf..5bce4ddc9c 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -89,6 +89,10 @@ class PredictRequest(proto.Message): ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + labels (MutableMapping[str, str]): + Optional. The user labels for Imagen billing + usage only. Only Imagen supports labels. For + other use cases, it will be ignored. """ endpoint: str = proto.Field( @@ -105,6 +109,11 @@ class PredictRequest(proto.Message): number=3, message=struct_pb2.Value, ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) class PredictResponse(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py index e6d1f8babb..82be2fc989 100644 --- a/google/cloud/aiplatform_v1beta1/types/tool.py +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -107,21 +107,73 @@ class Tool(proto.Message): specific Function Declarations. """ + class PhishBlockThreshold(proto.Enum): + r"""These are available confidence level user can set to block + malicious urls with chosen confidence and above. For + understanding different confidence of webrisk, please refer to + https://cloud.google.com/web-risk/docs/reference/rpc/google.cloud.webrisk.v1eap1#confidencelevel + + Values: + PHISH_BLOCK_THRESHOLD_UNSPECIFIED (0): + Defaults to unspecified. + BLOCK_LOW_AND_ABOVE (30): + Blocks Low and above confidence URL that is + risky. + BLOCK_MEDIUM_AND_ABOVE (40): + Blocks Medium and above confidence URL that + is risky. + BLOCK_HIGH_AND_ABOVE (50): + Blocks High and above confidence URL that is + risky. + BLOCK_HIGHER_AND_ABOVE (55): + Blocks Higher and above confidence URL that + is risky. + BLOCK_VERY_HIGH_AND_ABOVE (60): + Blocks Very high and above confidence URL + that is risky. + BLOCK_ONLY_EXTREMELY_HIGH (100): + Blocks Extremely high confidence URL that is + risky. + """ + + PHISH_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 30 + BLOCK_MEDIUM_AND_ABOVE = 40 + BLOCK_HIGH_AND_ABOVE = 50 + BLOCK_HIGHER_AND_ABOVE = 55 + BLOCK_VERY_HIGH_AND_ABOVE = 60 + BLOCK_ONLY_EXTREMELY_HIGH = 100 + class GoogleSearch(proto.Message): r"""GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. + blocking_confidence (google.cloud.aiplatform_v1beta1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class CodeExecution(proto.Message): r"""Tool that executes code generated by the model, and automatically @@ -816,17 +868,32 @@ class EnterpriseWebSearch(proto.Message): r"""Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. + blocking_confidence (google.cloud.aiplatform_v1beta1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class DynamicRetrievalConfig(proto.Message): diff --git a/pypi/_vertex_ai_placeholder/version.py b/pypi/_vertex_ai_placeholder/version.py index 442134d161..40bea58715 100644 --- a/pypi/_vertex_ai_placeholder/version.py +++ b/pypi/_vertex_ai_placeholder/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.119.0" +__version__ = "1.120.0" diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 6e0024dc79..0519cfa693 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.119.0" + "version": "1.120.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index c49361009f..a8c22e4822 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.119.0" + "version": "1.120.0" }, "snippets": [ { diff --git a/tests/unit/aiplatform/test_matching_engine_index.py b/tests/unit/aiplatform/test_matching_engine_index.py index 42849b936c..1a66d3be71 100644 --- a/tests/unit/aiplatform/test_matching_engine_index.py +++ b/tests/unit/aiplatform/test_matching_engine_index.py @@ -77,7 +77,12 @@ _TEST_INDEX_APPROXIMATE_NEIGHBORS_COUNT = 150 _TEST_LEAF_NODE_EMBEDDING_COUNT = 123 _TEST_LEAF_NODES_TO_SEARCH_PERCENT = 50 -_TEST_SHARD_SIZES = ["SHARD_SIZE_SMALL", "SHARD_SIZE_LARGE", "SHARD_SIZE_MEDIUM"] +_TEST_SHARD_SIZES = [ + "SHARD_SIZE_SMALL", + "SHARD_SIZE_LARGE", + "SHARD_SIZE_MEDIUM", + "SHARD_SIZE_SO_DYNAMIC", +] _TEST_INDEX_DESCRIPTION = test_constants.MatchingEngineConstants._TEST_INDEX_DESCRIPTION diff --git a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py index 61db76ba18..a8009e099e 100644 --- a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py +++ b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py @@ -103,6 +103,7 @@ "service-account-name-1@project-id.iam.gserviceaccount.com", "service-account-name-2@project-id.iam.gserviceaccount.com", ] +_TEST_DEPLOYMENT_TIER = "STORAGE" _TEST_SIGNED_JWT = "signed_jwt" _TEST_AUTHORIZATION_METADATA = (("authorization", f"Bearer: {_TEST_SIGNED_JWT}"),) @@ -1324,6 +1325,47 @@ def test_deploy_index_psc_automation_configs(self, deploy_index_mock, sync): timeout=_TEST_TIMEOUT, ) + @pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_deploy_index_deployment_tier(self, deploy_index_mock, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint( + index_endpoint_name=_TEST_INDEX_ENDPOINT_ID + ) + + # Get index + my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_NAME) + + my_index_endpoint = my_index_endpoint.deploy_index( + index=my_index, + deployed_index_id=_TEST_DEPLOYED_INDEX_ID, + display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME, + deployment_tier=_TEST_DEPLOYMENT_TIER, + request_metadata=_TEST_REQUEST_METADATA, + sync=sync, + deploy_request_timeout=_TEST_TIMEOUT, + ) + + if not sync: + my_index_endpoint.wait() + + deploy_index_mock.assert_called_once_with( + index_endpoint=my_index_endpoint.resource_name, + deployed_index=gca_index_endpoint.DeployedIndex( + id=_TEST_DEPLOYED_INDEX_ID, + index=my_index.resource_name, + display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME, + automatic_resources={ + "min_replica_count": None, + "max_replica_count": None, + }, + deployment_tier=_TEST_DEPLOYMENT_TIER, + ), + metadata=_TEST_REQUEST_METADATA, + timeout=_TEST_TIMEOUT, + ) + @pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock") def test_mutate_deployed_index(self, mutate_deployed_index_mock): aiplatform.init(project=_TEST_PROJECT) diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py index dd85130663..5a0da4a120 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py @@ -4717,7 +4717,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -4727,7 +4728,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -5222,7 +5224,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -5232,7 +5235,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -6589,7 +6593,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -6599,7 +6604,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -7130,7 +7136,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -7140,7 +7147,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index 41f0436ffe..79535ab5a3 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -6502,6 +6502,7 @@ def test_create_index_endpoint_rest_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -7087,6 +7088,7 @@ def test_update_index_endpoint_rest_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -7780,6 +7782,7 @@ def test_mutate_deployed_index_rest_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -8889,6 +8892,7 @@ async def test_create_index_endpoint_rest_asyncio_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -9523,6 +9527,7 @@ async def test_update_index_endpoint_rest_asyncio_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -10282,6 +10287,7 @@ async def test_mutate_deployed_index_rest_asyncio_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index bbff4dca10..1ba265257f 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -5427,19 +5427,22 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -5449,22 +5452,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py index 262cd43467..784e4d8427 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py @@ -4728,7 +4728,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -4738,7 +4739,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -5244,7 +5246,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -5254,7 +5257,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -6622,7 +6626,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -6632,7 +6637,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -7174,7 +7180,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -7184,7 +7191,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index af4ae4e5a0..d2575b79e9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -5403,19 +5403,22 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -5425,22 +5428,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "squid" + dataset = "clam" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", + "project": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/vertexai/genai/replays/conftest.py b/tests/unit/vertexai/genai/replays/conftest.py index f1c963a3ca..0cd57473a0 100644 --- a/tests/unit/vertexai/genai/replays/conftest.py +++ b/tests/unit/vertexai/genai/replays/conftest.py @@ -131,29 +131,39 @@ def _get_replay_id(use_vertex: bool, replays_prefix: str) -> str: EVAL_CONFIG_GCS_URI = ( "gs://vertex-ai-generative-ai-eval-sdk-resources/metrics/text_quality/v1.0.0.yaml" ) +EVAL_ITEM_REQUEST_GCS_URI = ( + "gs://lakeyk-limited-bucket/agora_eval_080525/request_4813679498589372416.json" +) +EVAL_ITEM_RESULT_GCS_URI = ( + "gs://lakeyk-limited-bucket/agora_eval_080525/result_1486082323915997184.json" +) +EVAL_GCS_URI_ITEMS = { + EVAL_CONFIG_GCS_URI: "test_resources/mock_eval_config.yaml", + EVAL_ITEM_REQUEST_GCS_URI: "test_resources/request_4813679498589372416.json", + EVAL_ITEM_RESULT_GCS_URI: "test_resources/result_1486082323915997184.json", +} def _mock_read_file_contents_side_effect(uri: str): """ Side effect to mock GcsUtils.read_file_contents for eval test test_batch_evaluate. """ - if uri == EVAL_CONFIG_GCS_URI: + if uri in EVAL_GCS_URI_ITEMS: # Construct the absolute path to the local mock file. current_dir = os.path.dirname(__file__) - local_yaml_path = os.path.join( - current_dir, "test_resources/mock_eval_config.yaml" - ) + local_mock_file_path = os.path.join(current_dir, EVAL_GCS_URI_ITEMS[uri]) try: - with open(local_yaml_path, "r") as f: + with open(local_mock_file_path, "r") as f: return f.read() except FileNotFoundError: raise FileNotFoundError( - "The mock data file 'mock_eval_config.yaml' was not found." + f"The mock data file '{EVAL_GCS_URI_ITEMS[uri]}' was not found." ) raise ValueError( f"Unexpected GCS URI '{uri}' in replay test. Only " - f"'{EVAL_CONFIG_GCS_URI}' is mocked." + f"'{EVAL_CONFIG_GCS_URI}', '{EVAL_ITEM_REQUEST_GCS_URI}', and " + f"'{EVAL_ITEM_RESULT_GCS_URI}' are mocked." ) diff --git a/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py b/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py index d4f59f25ba..ea83bdeb2f 100644 --- a/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py +++ b/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py @@ -14,6 +14,7 @@ # # pylint: disable=protected-access,bad-continuation,missing-function-docstring +import logging import pytest @@ -21,10 +22,13 @@ from vertexai._genai import types -def test_agent_engine_delete(client): +def test_agent_engine_delete(client, caplog): + caplog.set_level(logging.INFO) agent_engine = client.agent_engines.create() operation = client.agent_engines.delete(name=agent_engine.api_resource.name) assert isinstance(operation, types.DeleteAgentEngineOperation) + assert "Deleting AgentEngine resource" in caplog.text + assert f"Started AgentEngine delete operation: {operation.name}" in caplog.text pytestmark = pytest_helper.setup( @@ -38,10 +42,13 @@ def test_agent_engine_delete(client): @pytest.mark.asyncio -async def test_agent_engine_delete_async(client): +async def test_agent_engine_delete_async(client, caplog): + caplog.set_level(logging.INFO) # TODO(b/431785750): use async methods for create() when available agent_engine = client.agent_engines.create() operation = await client.aio.agent_engines.delete( name=agent_engine.api_resource.name ) assert isinstance(operation, types.DeleteAgentEngineOperation) + assert "Deleting AgentEngine resource" in caplog.text + assert f"Started AgentEngine delete operation: {operation.name}" in caplog.text diff --git a/tests/unit/vertexai/genai/replays/test_execute_code_agent_engine_sandbox.py b/tests/unit/vertexai/genai/replays/test_execute_code_agent_engine_sandbox.py index ab51fe4904..e1f20ef3cd 100644 --- a/tests/unit/vertexai/genai/replays/test_execute_code_agent_engine_sandbox.py +++ b/tests/unit/vertexai/genai/replays/test_execute_code_agent_engine_sandbox.py @@ -33,15 +33,30 @@ def test_execute_code_sandbox(client): config=types.CreateAgentEngineSandboxConfig(display_name="test_sandbox"), ) assert isinstance(operation, types.AgentEngineSandboxOperation) + + code = """ +with open("test.txt", "r") as input: + with open("output.txt", "w") as output_txt: + for line in input: + output_txt.write(line) +""" input_data = { - "language": "python", - "code": 'with open("hello.txt","w") as file:\n file.write("Hello, world!")', + "code": code, + "files": [ + { + "name": "test.txt", + "mimeType": "text/plain", + "content": b"Hello, world!", + } + ], } response = client.agent_engines.sandboxes.execute_code( name=operation.response.name, input_data=input_data, ) assert response.outputs[0].mime_type == "application/json" + assert response.outputs[1].data == b"Hello, world!" + assert response.outputs[1].metadata.attributes.get("file_name") == b"output.txt" pytestmark = pytest_helper.setup( diff --git a/tests/unit/vertexai/genai/replays/test_generate_agent_engine_memories.py b/tests/unit/vertexai/genai/replays/test_generate_agent_engine_memories.py index 1ddc70debd..8b2c67cab2 100644 --- a/tests/unit/vertexai/genai/replays/test_generate_agent_engine_memories.py +++ b/tests/unit/vertexai/genai/replays/test_generate_agent_engine_memories.py @@ -19,14 +19,19 @@ from google.genai import types as genai_types -def test_generate_memories(client): +def test_generate_and_rollback_memories(client): + client._api_client._http_options.base_url = ( + "https://us-central1-autopush-aiplatform.sandbox.googleapis.com/" + ) agent_engine = client.agent_engines.create() assert not list( - client.agent_engines.list_memories( + client.agent_engines.memories.list( name=agent_engine.api_resource.name, ) ) - client.agent_engines.generate_memories( + # Generate memories using source content. This result is non-deterministic, + # because an LLM is used to generate the memories. + client.agent_engines.memories.generate( name=agent_engine.api_resource.name, scope={"user_id": "test-user-id"}, direct_contents_source=types.GenerateMemoriesRequestDirectContentsSource( @@ -43,23 +48,72 @@ def test_generate_memories(client): ) ] ), + config=types.GenerateAgentEngineMemoriesConfig( + revision_labels={"key": "value"} + ), ) - assert ( - len( - list( - client.agent_engines.list_memories( - name=agent_engine.api_resource.name, - ) - ) + memories = list( + client.agent_engines.memories.list( + name=agent_engine.api_resource.name, ) - >= 1 ) + assert len(memories) >= 1 + + # Every action that modifies a memory creates a new revision. + memory_revisions = list( + client.agent_engines.memories.revisions.list( + name=memories[0].name, + ) + ) + assert len(memory_revisions) >= 1 + # The revision's labels depend on the generation request's revision labels. + assert memory_revisions[0].labels == {"key": "value"} + revision_name = memory_revisions[0].name + + # Update the memory. + client.agent_engines.memories._update( + name=memories[0].name, + fact="This is temporary", + scope={"user_id": "test-user-id"}, + ) + memory = client.agent_engines.memories.get(name=memories[0].name) + assert memory.fact == "This is temporary" + + # Rollback to the revision with the original fact that was created by the + # generation request. + client.agent_engines.memories.rollback( + name=memories[0].name, + target_revision_id=revision_name.split("/")[-1], + ) + memory = client.agent_engines.memories.get(name=memories[0].name) + assert memory.fact == memory_revisions[0].fact + + # Update the memory again using generation. We use the original source + # content to ensure that the original memory is updated. The response should + # refer to the previous revision. + response = client.agent_engines.memories.generate( + name=agent_engine.api_resource.name, + scope={"user_id": "test-user-id"}, + direct_contents_source=types.GenerateMemoriesRequestDirectContentsSource( + events=[ + types.GenerateMemoriesRequestDirectContentsSourceEvent( + content=genai_types.Content( + role="model", + parts=[genai_types.Part(text=memory_revisions[0].fact)], + ) + ) + ] + ), + ) + # The memory was updated, so the previous revision is set. + assert response.response.generated_memories[0].previous_revision is not None + client.agent_engines.delete(name=agent_engine.api_resource.name, force=True) def test_generate_memories_direct_memories_source(client): agent_engine = client.agent_engines.create() - client.agent_engines.generate_memories( + client.agent_engines.memories.generate( name=agent_engine.api_resource.name, scope={"user_id": "test-user-id"}, direct_memories_source=types.GenerateMemoriesRequestDirectMemoriesSource( @@ -77,7 +131,7 @@ def test_generate_memories_direct_memories_source(client): assert ( len( list( - client.agent_engines.list_memories( + client.agent_engines.memories.list( name=agent_engine.api_resource.name, ) ) diff --git a/tests/unit/vertexai/genai/replays/test_get_evaluation_item.py b/tests/unit/vertexai/genai/replays/test_get_evaluation_item.py new file mode 100644 index 0000000000..687f5a3a16 --- /dev/null +++ b/tests/unit/vertexai/genai/replays/test_get_evaluation_item.py @@ -0,0 +1,146 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# pylint: disable=protected-access,bad-continuation,missing-function-docstring + +from tests.unit.vertexai.genai.replays import pytest_helper +from vertexai import types +import datetime +import pytest + + +def test_get_eval_item_response(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem.""" + evaluation_item_name = "projects/503583131166/locations/us-central1/evaluationItems/1486082323915997184" + evaluation_item = client.evals.get_evaluation_item(name=evaluation_item_name) + assert isinstance(evaluation_item, types.EvaluationItem) + check_item_1486082323915997184(evaluation_item, evaluation_item_name) + + +def test_get_eval_item_request(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem with request.""" + evaluation_item_name = "projects/503583131166/locations/us-central1/evaluationItems/4813679498589372416" + evaluation_item = client.evals.get_evaluation_item(name=evaluation_item_name) + assert isinstance(evaluation_item, types.EvaluationItem) + check_item_4813679498589372416(evaluation_item, evaluation_item_name) + + +pytest_plugins = ("pytest_asyncio",) + + +@pytest.mark.asyncio +async def test_get_eval_item_response_async(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem.""" + eval_item_id = "1486082323915997184" + evaluation_item_name = ( + f"projects/503583131166/locations/us-central1/evaluationItems/{eval_item_id}" + ) + evaluation_item = await client.aio.evals.get_evaluation_item(name=eval_item_id) + check_item_1486082323915997184(evaluation_item, evaluation_item_name) + + +@pytest.mark.asyncio +async def test_get_eval_item_request_async(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem with request.""" + eval_item_id = "4813679498589372416" + evaluation_item_name = ( + f"projects/503583131166/locations/us-central1/evaluationItems/{eval_item_id}" + ) + evaluation_item = await client.aio.evals.get_evaluation_item(name=eval_item_id) + check_item_4813679498589372416(evaluation_item, evaluation_item_name) + + +def check_item_1486082323915997184( + evaluation_item: types.EvaluationItem, evaluation_item_name: str +): + assert evaluation_item.name == evaluation_item_name + assert evaluation_item.display_name == "universal result for 7119522507803066368" + assert evaluation_item.evaluation_item_type == types.EvaluationItemType.RESULT + assert ( + evaluation_item.gcs_uri + == "gs://lakeyk-limited-bucket/agora_eval_080525/result_1486082323915997184.json" + ) + assert evaluation_item.create_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 713792, tzinfo=datetime.timezone.utc + ) + assert isinstance(evaluation_item.evaluation_response, types.EvaluationItemResult) + assert ( + evaluation_item.evaluation_response.evaluation_request + == "projects/503583131166/locations/us-central1/evaluationItems/7119522507803066368" + ) + assert ( + evaluation_item.evaluation_response.evaluation_run + == "projects/503583131166/locations/us-central1/evaluationRuns/1957799200510967808" + ) + # Check the first candidate result. + candidate_result = evaluation_item.evaluation_response.candidate_results[0] + assert candidate_result.candidate == "gemini-2.0-flash-001@default" + assert candidate_result.metric == "universal" + assert candidate_result.score == 0.2857143 + # Check the first rubric verdict. + rubric_verdict = candidate_result.rubric_verdicts[0] + assert rubric_verdict.verdict + assert ( + rubric_verdict.reasoning + == "The entire response is written in the English language." + ) + assert rubric_verdict.evaluated_rubric.type == "LANGUAGE:PRIMARY_RESPONSE_LANGUAGE" + assert rubric_verdict.evaluated_rubric.importance == "HIGH" + assert ( + rubric_verdict.evaluated_rubric.content.property.description + == "The response is in English." + ) + # Check the request. + request = evaluation_item.evaluation_response.request + assert ( + "There is a wide range of potato varieties to choose from" + in request.prompt.text + ) + assert request.candidate_responses[0].candidate == "gemini-2.0-flash-001@default" + assert "Pick out your potato variety" in request.candidate_responses[0].text + + +def check_item_4813679498589372416( + evaluation_item: types.EvaluationItem, evaluation_item_name: str +): + assert evaluation_item.name == evaluation_item_name + assert evaluation_item.display_name == "4813679498589372416" + assert evaluation_item.evaluation_item_type == types.EvaluationItemType.REQUEST + assert ( + evaluation_item.gcs_uri + == "gs://lakeyk-limited-bucket/agora_eval_080525/request_4813679498589372416.json" + ) + assert evaluation_item.create_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 338353, tzinfo=datetime.timezone.utc + ) + assert isinstance(evaluation_item.evaluation_request, types.EvaluationItemRequest) + # Check the request. + request = evaluation_item.evaluation_request + assert ( + "If your ball is curving during flight from left to right" + in request.prompt.text + ) + # Check the first candidate response. + assert request.candidate_responses[0].candidate == "gemini-2.0-flash-001@default" + assert ( + "Keep your knees bent during the backswing" + in request.candidate_responses[0].text + ) + + +pytestmark = pytest_helper.setup( + file=__file__, + globals_for_file=globals(), + test_method="evals.get_evaluation_item", +) diff --git a/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py b/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py index e4e28af4f0..d28cca7a3e 100644 --- a/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py +++ b/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py @@ -16,6 +16,7 @@ from tests.unit.vertexai.genai.replays import pytest_helper from vertexai import types +from vertexai._genai import _evals_visualization import datetime import pytest @@ -188,6 +189,38 @@ def check_run_1957799200510967808( ) ) assert evaluation_run.error is None + eval_result = _evals_visualization._get_eval_result_from_eval_run( + evaluation_run.evaluation_results + ) + assert isinstance(eval_result, types.EvaluationResult) + assert eval_result.summary_metrics == [ + types.AggregatedMetricResult( + metric_name="checkpoint_1/universal", + mean_score=0.986633250587865, + stdev_score=0.0393092386127714, + ), + types.AggregatedMetricResult( + metric_name="checkpoint_2/universal", + mean_score=0.9438178790243048, + stdev_score=0.07597187617837561, + ), + types.AggregatedMetricResult( + metric_name="gemini-2.0-flash-001@default/universal", + mean_score=0.6943817985685249, + stdev_score=0.17738341388587855, + ), + types.AggregatedMetricResult( + metric_name="checkpoint_1/user_defined", mean_score=5, stdev_score=0 + ), + types.AggregatedMetricResult( + metric_name="checkpoint_2/user_defined", mean_score=5, stdev_score=0 + ), + types.AggregatedMetricResult( + metric_name="gemini-2.0-flash-001@default/user_defined", + mean_score=4.736842105263158, + stdev_score=0.6359497880839245, + ), + ] pytestmark = pytest_helper.setup( diff --git a/tests/unit/vertexai/genai/replays/test_get_evaluation_set.py b/tests/unit/vertexai/genai/replays/test_get_evaluation_set.py new file mode 100644 index 0000000000..37b99f1486 --- /dev/null +++ b/tests/unit/vertexai/genai/replays/test_get_evaluation_set.py @@ -0,0 +1,89 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# pylint: disable=protected-access,bad-continuation,missing-function-docstring + +from tests.unit.vertexai.genai.replays import pytest_helper +from vertexai import types +import datetime +import pytest + + +def test_get_eval_set(client): + """Tests that get_evaluation_set() returns a correctly structured EvaluationSet.""" + evaluation_set_name = ( + "projects/503583131166/locations/us-central1/evaluationSets/102386522778501120" + ) + evaluation_set = client.evals.get_evaluation_set(name=evaluation_set_name) + assert isinstance(evaluation_set, types.EvaluationSet) + check_set_102386522778501120(evaluation_set, evaluation_set_name) + + +pytest_plugins = ("pytest_asyncio",) + + +@pytest.mark.asyncio +async def test_get_eval_set_async(client): + """Tests that get_evaluation_set() returns a correctly structured EvaluationSet.""" + eval_set_id = "102386522778501120" + evaluation_set_name = ( + f"projects/503583131166/locations/us-central1/evaluationSets/{eval_set_id}" + ) + evaluation_set = await client.aio.evals.get_evaluation_set(name=eval_set_id) + check_set_102386522778501120(evaluation_set, evaluation_set_name) + + +def check_set_102386522778501120( + evaluation_set: types.EvaluationSet, evaluation_set_name: str +): + assert evaluation_set.name == evaluation_set_name + assert ( + evaluation_set.display_name + == "Results Set for EvaluationRun 1957799200510967808" + ) + assert evaluation_set.evaluation_items == [ + "projects/503583131166/locations/us-central1/evaluationItems/2748216119486578688", + "projects/503583131166/locations/us-central1/evaluationItems/1486082323915997184", + "projects/503583131166/locations/us-central1/evaluationItems/2219043163270545408", + "projects/503583131166/locations/us-central1/evaluationItems/8570244537769787392", + "projects/503583131166/locations/us-central1/evaluationItems/2112082672120496128", + "projects/503583131166/locations/us-central1/evaluationItems/8192505119024087040", + "projects/503583131166/locations/us-central1/evaluationItems/1383625432393318400", + "projects/503583131166/locations/us-central1/evaluationItems/5832267070561058816", + "projects/503583131166/locations/us-central1/evaluationItems/1733991409653907456", + "projects/503583131166/locations/us-central1/evaluationItems/2549142942207967232", + "projects/503583131166/locations/us-central1/evaluationItems/8565740938142416896", + "projects/503583131166/locations/us-central1/evaluationItems/6069620844672319488", + "projects/503583131166/locations/us-central1/evaluationItems/7777822109585113088", + "projects/503583131166/locations/us-central1/evaluationItems/5656415578861076480", + "projects/503583131166/locations/us-central1/evaluationItems/5926842662735839232", + "projects/503583131166/locations/us-central1/evaluationItems/648623899457617920", + "projects/503583131166/locations/us-central1/evaluationItems/4349245787016790016", + "projects/503583131166/locations/us-central1/evaluationItems/1119038954285301760", + "projects/503583131166/locations/us-central1/evaluationItems/5741983971781115904", + ] + assert evaluation_set.create_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 413954, tzinfo=datetime.timezone.utc + ) + assert evaluation_set.update_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 413954, tzinfo=datetime.timezone.utc + ) + assert evaluation_set.metadata is None + + +pytestmark = pytest_helper.setup( + file=__file__, + globals_for_file=globals(), + test_method="evals.get_evaluation_set", +) diff --git a/tests/unit/vertexai/genai/replays/test_list_agent_engine_session_events.py b/tests/unit/vertexai/genai/replays/test_list_agent_engine_session_events.py index 35ed900a54..4aa5b7f283 100644 --- a/tests/unit/vertexai/genai/replays/test_list_agent_engine_session_events.py +++ b/tests/unit/vertexai/genai/replays/test_list_agent_engine_session_events.py @@ -15,6 +15,7 @@ # pylint: disable=protected-access,bad-continuation,missing-function-docstring import datetime +import pytest from tests.unit.vertexai.genai.replays import pytest_helper from vertexai._genai import types @@ -22,23 +23,23 @@ def test_list_session_events(client): agent_engine = client.agent_engines.create() - operation = client.agent_engines.create_session( + operation = client.agent_engines.sessions.create( name=agent_engine.api_resource.name, user_id="test-user-123", ) session = operation.response assert not list( - client.agent_engines.list_session_events( + client.agent_engines.sessions.events.list( name=session.name, ) ) - client.agent_engines.append_session_event( + client.agent_engines.sessions.events.append( name=session.name, author="test-user-123", invocation_id="test-invocation-id", timestamp=datetime.datetime.fromtimestamp(1234567890, tz=datetime.timezone.utc), ) - session_event_list = client.agent_engines.list_session_events( + session_event_list = client.agent_engines.sessions.events.list( name=session.name, ) assert len(session_event_list) == 1 @@ -48,5 +49,33 @@ def test_list_session_events(client): pytestmark = pytest_helper.setup( file=__file__, globals_for_file=globals(), - test_method="agent_engines.list_session_events", + test_method="agent_engines.sessions.events.list", ) + + +pytest_plugins = ("pytest_asyncio",) + + +@pytest.mark.asyncio +async def test_async_list_session_events(client): + agent_engine = client.agent_engines.create() + operation = await client.aio.agent_engines.sessions.create( + name=agent_engine.api_resource.name, + user_id="test-user-123", + ) + session = operation.response + pager = await client.aio.agent_engines.sessions.events.list(name=session.name) + assert not [item async for item in pager] + + await client.aio.agent_engines.sessions.events.append( + name=session.name, + author="test-user-123", + invocation_id="test-invocation-id", + timestamp=datetime.datetime.fromtimestamp(1234567890, tz=datetime.timezone.utc), + ) + pager = await client.aio.agent_engines.sessions.events.list(name=session.name) + session_event_list = [item async for item in pager] + assert len(session_event_list) == 1 + assert isinstance(session_event_list[0], types.SessionEvent) + + client.agent_engines.delete(name=agent_engine.api_resource.name, force=True) diff --git a/tests/unit/vertexai/genai/replays/test_list_agent_engine_sessions.py b/tests/unit/vertexai/genai/replays/test_list_agent_engine_sessions.py index 258115d492..1b296ce992 100644 --- a/tests/unit/vertexai/genai/replays/test_list_agent_engine_sessions.py +++ b/tests/unit/vertexai/genai/replays/test_list_agent_engine_sessions.py @@ -14,6 +14,9 @@ # # pylint: disable=protected-access,bad-continuation,missing-function-docstring +import pytest + + from tests.unit.vertexai.genai.replays import pytest_helper from vertexai._genai import types @@ -21,23 +24,49 @@ def test_list_sessions(client): agent_engine = client.agent_engines.create() assert not list( - client.agent_engines.list_sessions( + client.agent_engines.sessions.list( name=agent_engine.api_resource.name, ) ) - client.agent_engines.create_session( + client.agent_engines.sessions.create( name=agent_engine.api_resource.name, user_id="test-user-123", ) - session_list = client.agent_engines.list_sessions( + session_list = client.agent_engines.sessions.list( name=agent_engine.api_resource.name, ) assert len(session_list) == 1 assert isinstance(session_list[0], types.Session) + client.agent_engines.delete(name=agent_engine.api_resource.name, force=True) + pytestmark = pytest_helper.setup( file=__file__, globals_for_file=globals(), - test_method="agent_engines.list_sessions", + test_method="agent_engines.sessions.list", ) + +pytest_plugins = ("pytest_asyncio",) + + +@pytest.mark.asyncio +async def test_async_list_sessions(client): + agent_engine = client.agent_engines.create() + pager = await client.aio.agent_engines.sessions.list( + name=agent_engine.api_resource.name + ) + assert not [item async for item in pager] + + await client.aio.agent_engines.sessions.create( + name=agent_engine.api_resource.name, + user_id="test-user-123", + ) + pager = await client.aio.agent_engines.sessions.list( + name=agent_engine.api_resource.name, + ) + session_list = [item async for item in pager] + assert len(session_list) == 1 + assert isinstance(session_list[0], types.Session) + + client.agent_engines.delete(name=agent_engine.api_resource.name, force=True) diff --git a/tests/unit/vertexai/genai/test_agent_engines.py b/tests/unit/vertexai/genai/test_agent_engines.py index ea4df4bfdd..b06a09c7ba 100644 --- a/tests/unit/vertexai/genai/test_agent_engines.py +++ b/tests/unit/vertexai/genai/test_agent_engines.py @@ -2271,7 +2271,7 @@ def teardown_method(self): def test_delete_agent_engine(self): with mock.patch.object( - self.client.agent_engines._api_client, "async_request" + self.client.aio.agent_engines._api_client, "async_request" ) as request_mock: request_mock.return_value = genai_types.HttpResponse(body="") asyncio.run( @@ -2288,7 +2288,7 @@ def test_delete_agent_engine(self): def test_delete_agent_engine_force(self): with mock.patch.object( - self.client.agent_engines._api_client, "async_request" + self.client.aio.agent_engines._api_client, "async_request" ) as request_mock: request_mock.return_value = genai_types.HttpResponse(body="") asyncio.run( diff --git a/tests/unit/vertexai/genai/test_evals.py b/tests/unit/vertexai/genai/test_evals.py index 475f60913d..254fd068c3 100644 --- a/tests/unit/vertexai/genai/test_evals.py +++ b/tests/unit/vertexai/genai/test_evals.py @@ -2036,15 +2036,15 @@ def test_convert_simple_request_response(self): raw_data = [ { "format": "observability", - "request": [ + "request": json.dumps( {"role": "user", "parts": [{"content": "Hello", "type": "text"}]} - ], - "response": [ + ), + "response": json.dumps( { "role": "system", "parts": [{"content": "Hi", "type": "text"}], } - ], + ), } ] result_dataset = self.converter.convert(raw_data) @@ -2068,19 +2068,21 @@ def test_convert_with_system_instruction(self): raw_data = [ { "format": "observability", - "request": [ + "request": json.dumps( {"role": "user", "parts": [{"content": "Hello", "type": "text"}]} - ], - "response": [ + ), + "response": json.dumps( { "role": "system", "parts": [{"content": "Hi", "type": "text"}], } - ], - "system_instruction": { - "role": "user", - "parts": [{"content": "Be helpful", "type": "text"}], - }, + ), + "system_instruction": json.dumps( + { + "role": "user", + "parts": [{"content": "Be helpful", "type": "text"}], + } + ), } ] result_dataset = self.converter.convert(raw_data) @@ -2093,22 +2095,28 @@ def test_convert_with_conversation_history(self): raw_data = [ { "format": "observability", - "request": [ - {"role": "user", "parts": [{"content": "Hello", "type": "text"}]}, - {"role": "system", "parts": [{"content": "Hi", "type": "text"}]}, + "request": json.dumps( + {"role": "user", "parts": [{"content": "Hello", "type": "text"}]} + ) + + "\n" + + json.dumps( + {"role": "system", "parts": [{"content": "Hi", "type": "text"}]} + ) + + "\n" + + json.dumps( { "role": "user", "parts": [ {"content": "What's the meaning of life?", "type": "text"} ], - }, - ], - "response": [ + } + ), + "response": json.dumps( { "role": "system", "parts": [{"content": "42.", "type": "text"}], } - ], + ), } ] @@ -2139,27 +2147,27 @@ def test_convert_multiple_request_response(self): raw_data = [ { "format": "observability", - "request": [ + "request": json.dumps( {"role": "user", "parts": [{"content": "Hello", "type": "text"}]} - ], - "response": [ + ), + "response": json.dumps( { "role": "system", "parts": [{"content": "Hi", "type": "text"}], } - ], + ), }, { "format": "observability", - "request": [ + "request": json.dumps( {"role": "user", "parts": [{"content": "Goodbye", "type": "text"}]} - ], - "response": [ + ), + "response": json.dumps( { "role": "system", "parts": [{"content": "Bye", "type": "text"}], } - ], + ), }, ] result_dataset = self.converter.convert(raw_data) @@ -2187,7 +2195,7 @@ def test_convert_skips_unknown_part_type(self): raw_data = [ { "format": "observability", - "request": [ + "request": json.dumps( { "role": "user", "parts": [ @@ -2196,13 +2204,13 @@ def test_convert_skips_unknown_part_type(self): {"content": "Hello", "type": "text"}, ], } - ], - "response": [ + ), + "response": json.dumps( { "role": "system", "parts": [{"content": "Hi", "type": "text"}], } - ], + ), } ] @@ -2217,12 +2225,12 @@ def test_convert_skips_missing_request(self): raw_data = [ { "format": "observability", - "response": [ + "response": json.dumps( { "role": "system", "parts": [{"content": "Hi", "type": "text"}], } - ], + ), } ] result_dataset = self.converter.convert(raw_data) @@ -2232,9 +2240,9 @@ def test_convert_skips_missing_response(self): raw_data = [ { "format": "observability", - "request": [ + "request": json.dumps( {"role": "user", "parts": [{"content": "Hello", "type": "text"}]} - ], + ), } ] result_dataset = self.converter.convert(raw_data) @@ -2244,7 +2252,7 @@ def test_convert_tool_call_parts(self): raw_data = [ { "format": "observability", - "request": [ + "request": json.dumps( { "role": "user", "parts": [ @@ -2256,8 +2264,8 @@ def test_convert_tool_call_parts(self): } ], } - ], - "response": [ + ), + "response": json.dumps( { "role": "system", "parts": [ @@ -2268,7 +2276,7 @@ def test_convert_tool_call_parts(self): } ], } - ], + ), } ] result_dataset = self.converter.convert(raw_data) @@ -2297,10 +2305,10 @@ def test_convert_tool_call_parts(self): ) -class TestAgentMetadata: - """Unit tests for the AgentMetadata class.""" +class TestAgentInfo: + """Unit tests for the AgentInfo class.""" - def test_agent_metadata_creation(self): + def test_agent_info_creation(self): tool = genai_types.Tool( function_declarations=[ genai_types.FunctionDeclaration( @@ -2313,18 +2321,16 @@ def test_agent_metadata_creation(self): ) ] ) - agent_metadata = vertexai_genai_types.AgentMetadata( + agent_info = vertexai_genai_types.AgentInfo( name="agent1", instruction="instruction1", description="description1", tool_declarations=[tool], - sub_agent_names=["sub_agent1"], ) - assert agent_metadata.name == "agent1" - assert agent_metadata.instruction == "instruction1" - assert agent_metadata.description == "description1" - assert agent_metadata.tool_declarations == [tool] - assert agent_metadata.sub_agent_names == ["sub_agent1"] + assert agent_info.name == "agent1" + assert agent_info.instruction == "instruction1" + assert agent_info.description == "description1" + assert agent_info.tool_declarations == [tool] class TestEvent: @@ -2359,13 +2365,11 @@ def test_eval_case_with_agent_eval_fields(self): ) ] ) - agent_metadata = { - "agent1": vertexai_genai_types.AgentMetadata( - name="agent1", - instruction="instruction1", - tool_declarations=[tool], - ) - } + agent_info = vertexai_genai_types.AgentInfo( + name="agent1", + instruction="instruction1", + tool_declarations=[tool], + ) intermediate_events = [ vertexai_genai_types.Event( event_id="event1", @@ -2381,14 +2385,26 @@ def test_eval_case_with_agent_eval_fields(self): response=genai_types.Content(parts=[genai_types.Part(text="Hi")]) ) ], - agent_metadata=agent_metadata, + agent_info=agent_info, intermediate_events=intermediate_events, ) - assert eval_case.agent_metadata == agent_metadata + assert eval_case.agent_info == agent_info assert eval_case.intermediate_events == intermediate_events +class TestSessionInput: + """Unit tests for the SessionInput class.""" + + def test_session_input_creation(self): + session_input = vertexai_genai_types.SessionInput( + user_id="user1", + state={"key": "value"}, + ) + assert session_input.user_id == "user1" + assert session_input.state == {"key": "value"} + + class TestMetric: """Unit tests for the Metric class.""" @@ -3425,15 +3441,15 @@ def test_auto_detect_observability_schema(self): raw_data = [ { "format": "observability", - "request": [ + "request": json.dumps( {"role": "user", "parts": [{"content": "Hello", "type": "text"}]} - ], - "response": [ + ), + "response": json.dumps( { "role": "system", "parts": [{"content": "Hi", "type": "text"}], } - ], + ), } ] assert ( diff --git a/vertexai/_genai/_agent_engines_utils.py b/vertexai/_genai/_agent_engines_utils.py index c9e2e37848..7a6fac2007 100644 --- a/vertexai/_genai/_agent_engines_utils.py +++ b/vertexai/_genai/_agent_engines_utils.py @@ -30,6 +30,7 @@ from typing import ( Any, AsyncIterator, + Awaitable, Callable, Coroutine, Dict, @@ -391,6 +392,41 @@ def __call__(self, *, operation_name: str, **kwargs) -> AgentEngineOperationUnio pass +class GetAsyncOperationFunction(Protocol): + async def __call__( + self, *, operation_name: str, **kwargs + ) -> Awaitable[AgentEngineOperationUnion]: + pass + + +async def _await_async_operation( + *, + operation_name: str, + get_operation_fn: GetAsyncOperationFunction, + poll_interval_seconds: float = 10, +) -> Any: + """Waits for the operation for creating an agent engine to complete. + + Args: + operation_name (str): + Required. The name of the operation for creating the Agent Engine. + poll_interval_seconds (float): + The number of seconds to wait between each poll. + get_operation_fn (Callable[[str], Awaitable[Any]]): + Optional. The async function to use for getting the operation. If not + provided, `self._get_agent_operation` will be used. + + Returns: + The operation that has completed (i.e. `operation.done==True`). + """ + operation = await get_operation_fn(operation_name=operation_name) + while not operation.done: + await asyncio.sleep(poll_interval_seconds) + operation = await get_operation_fn(operation_name=operation.name) + + return operation + + def _await_operation( *, operation_name: str, diff --git a/vertexai/_genai/_evals_common.py b/vertexai/_genai/_evals_common.py index 7a998db1e2..8e51c02a40 100644 --- a/vertexai/_genai/_evals_common.py +++ b/vertexai/_genai/_evals_common.py @@ -975,3 +975,37 @@ def _execute_evaluation( "Evaluation results uploaded successfully to GCS: %s", uploaded_path ) return evaluation_result + + +def _convert_gcs_to_evaluation_item_result( + api_client: BaseApiClient, + gcs_uri: str, +) -> types.EvaluationItemResult: + """Converts a json file to an EvaluationItemResult.""" + logger.info("Loading evaluation item result from GCS: %s", gcs_uri) + gcs_utils = _evals_utils.GcsUtils(api_client=api_client) + try: + eval_item_data = json.loads(gcs_utils.read_file_contents(gcs_uri)) + return types.EvaluationItemResult(**eval_item_data) + except Exception as e: + logger.error( + "Failed to load evaluation result from GCS: %s. Error: %s", gcs_uri, e + ) + return types.EvaluationItemResult() + + +def _convert_gcs_to_evaluation_item_request( + api_client: BaseApiClient, + gcs_uri: str, +) -> types.EvaluationItemRequest: + """Converts a json file to an EvaluationItemRequest.""" + logger.info("Loading evaluation item request from GCS: %s", gcs_uri) + gcs_utils = _evals_utils.GcsUtils(api_client=api_client) + try: + eval_item_data = json.loads(gcs_utils.read_file_contents(gcs_uri)) + return types.EvaluationItemRequest(**eval_item_data) + except Exception as e: + logger.error( + "Failed to load evaluation request from GCS: %s. Error: %s", gcs_uri, e + ) + return types.EvaluationItemRequest() diff --git a/vertexai/_genai/_evals_metric_handlers.py b/vertexai/_genai/_evals_metric_handlers.py index 8f97f37f92..d431e9b742 100644 --- a/vertexai/_genai/_evals_metric_handlers.py +++ b/vertexai/_genai/_evals_metric_handlers.py @@ -462,15 +462,25 @@ def _build_rubric_based_input( "must be a dictionary." ) - rubrics_list = rubric_groups_data.get(self.metric.rubric_group_name, []) + rubric_group_from_data = rubric_groups_data.get( + self.metric.rubric_group_name, {} + ) + if isinstance(rubric_group_from_data, dict): + rubrics_list = rubric_group_from_data.get("rubrics", []) + else: + rubrics_list = [] + if not isinstance(rubrics_list, list): logger.warning( - "Rubric group '%s' in 'rubric_groups' is not a list for case %s.", + "Rubrics for group '%s' in case %s is not a list: %s. " + "Skipping rubrics for this case.", self.metric.rubric_group_name, eval_case.eval_case_id, + rubrics_list, ) rubrics_list = [] + parsed_rubrics = [types.Rubric(**r) for r in rubrics_list] rubric_enhanced_contents = { "prompt": ( [eval_case.prompt.model_dump(mode="json", exclude_none=True)] @@ -481,8 +491,8 @@ def _build_rubric_based_input( "rubric_groups": { self.metric.rubric_group_name: { "rubrics": [ - r.model_dump(mode="json") if isinstance(r, types.Rubric) else r - for r in rubrics_list + r.model_dump(mode="json", exclude_none=True) + for r in parsed_rubrics ] } }, diff --git a/vertexai/_genai/_evals_visualization.py b/vertexai/_genai/_evals_visualization.py index 477b6b40ee..aa2a41c6f3 100644 --- a/vertexai/_genai/_evals_visualization.py +++ b/vertexai/_genai/_evals_visualization.py @@ -727,3 +727,42 @@ def display_evaluation_dataset(eval_dataset_obj: types.EvaluationDataset) -> Non dataframe_json_string = json.dumps(processed_rows, ensure_ascii=False, default=str) html_content = _get_inference_html(dataframe_json_string) display.display(display.HTML(html_content)) + + +def _get_eval_result_from_eval_run( + results: types.EvaluationRunResults, +) -> types.EvaluationResult: + """Retrieves an EvaluationResult from the resource name.""" + if ( + not results + or not results.summary_metrics + or not results.summary_metrics.metrics + ): + return types.EvaluationResult() + + aggregated_metrics_dict = {} + for name, value in results.summary_metrics.metrics.items(): + result = name.rsplit("/", 1) + full_metric_name = result[0] + aggregated_metric_name = result[1] + if full_metric_name not in aggregated_metrics_dict: + aggregated_metrics_dict[full_metric_name] = {} + aggregated_metrics_dict[full_metric_name]["sub_metric_name"] = ( + full_metric_name.split("/")[-1] + ) + aggregated_metrics_dict[full_metric_name][aggregated_metric_name] = value + + items_sorted = sorted( + aggregated_metrics_dict.items(), + key=lambda item: (item[1]["sub_metric_name"], item[0]), + ) + + aggregated_metrics = [ + types.AggregatedMetricResult( + metric_name=name, + mean_score=values.get("AVERAGE"), + stdev_score=values.get("STANDARD_DEVIATION"), + ) + for name, values in items_sorted + ] + return types.EvaluationResult(summary_metrics=aggregated_metrics) diff --git a/vertexai/_genai/_observability_data_converter.py b/vertexai/_genai/_observability_data_converter.py index 9eb43ae411..f7e7f11a08 100644 --- a/vertexai/_genai/_observability_data_converter.py +++ b/vertexai/_genai/_observability_data_converter.py @@ -28,6 +28,26 @@ logger = logging.getLogger("vertexai_genai._observability_data_converters") +def _load_jsonl(data: Any, case_id: str) -> list[dict[Any, Any]]: + """Parses the raw JSONL data into a list of dict possible.""" + if isinstance(data, str): + json_list = [] + for line in data.splitlines(): + loaded_json = json.loads(line) + if not isinstance(loaded_json, dict): + raise TypeError( + f"Decoded JSON payload is not a dict for case " + f"{case_id}. Type found: {type(loaded_json).__name__}" + ) + json_list.append(loaded_json) + return json_list + else: + raise TypeError( + f"Payload is not a JSONL string for case {case_id}. Type " + f"found: {type(data).__name__}" + ) + + class ObservabilityDataConverter(_evals_utils.EvalDataConverter): """Converter for dataset in GCP Observability GenAI format.""" @@ -131,44 +151,6 @@ def _parse_messages( reference=None, ) - def _load_json_dict(self, data: Any, case_id: str) -> dict[Any, str]: - """Parses the raw data into a dict if possible.""" - if isinstance(data, str): - loaded_json = json.loads(data) - if isinstance(loaded_json, dict): - return loaded_json - else: - raise TypeError( - f"Decoded JSON payload is not a dictionary for case " - f"{case_id}. Type found: {type(loaded_json).__name__}" - ) - elif isinstance(data, dict): - return data - else: - raise TypeError( - f"Payload is not a dictionary for case {case_id}. Type found: " - f"{type(data).__name__}" - ) - - def _load_json_list(self, data: Any, case_id: str) -> list[Any]: - """Parses the raw data into a list if possible.""" - if isinstance(data, str): - loaded_json = json.loads(data) - if isinstance(loaded_json, list): - return loaded_json - else: - raise TypeError( - f"Decoded JSON payload is not a list for case " - f"{case_id}. Type found: {type(loaded_json).__name__}" - ) - elif isinstance(data, list): - return data - else: - raise TypeError( - f"Payload is not a list for case {case_id}. Type found: " - f"{type(data).__name__}" - ) - @override def convert(self, raw_data: list[dict[str, Any]]) -> types.EvaluationDataset: """Converts a list of GCP Observability GenAI cases into an EvaluationDataset.""" @@ -185,15 +167,16 @@ def convert(self, raw_data: list[dict[str, Any]]) -> types.EvaluationDataset: continue request_data = case.get("request", []) - request_list = self._load_json_list(request_data, eval_case_id) + request_list = _load_jsonl(request_data, eval_case_id) response_data = case.get("response", []) - response_list = self._load_json_list(response_data, eval_case_id) + response_list = _load_jsonl(response_data, eval_case_id) system_dict = None if "system_instruction" in case: system_data = case.get("system_instruction", {}) - system_dict = self._load_json_dict(system_data, eval_case_id) + system_list = _load_jsonl(system_data, eval_case_id) + system_dict = system_list[0] if system_list else {} eval_case = self._parse_messages( eval_case_id, request_list, response_list, system_dict diff --git a/vertexai/_genai/agent_engines.py b/vertexai/_genai/agent_engines.py index b45edab806..34083d890e 100644 --- a/vertexai/_genai/agent_engines.py +++ b/vertexai/_genai/agent_engines.py @@ -318,7 +318,7 @@ def _create( self._api_client._verify_response(return_value) return return_value - def delete( + def _delete( self, *, name: str, @@ -725,6 +725,34 @@ def get( self._register_api_methods(agent_engine=agent_engine) return agent_engine + def delete( + self, + *, + name: str, + force: Optional[bool] = None, + config: Optional[types.DeleteAgentEngineConfigOrDict] = None, + ) -> types.DeleteAgentEngineOperation: + """ + Delete an Agent Engine resource. + + Args: + name (str): + Required. The name of the Agent Engine to be deleted. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}` + or `reasoningEngines/{resource_id}`. + force (bool): + Optional. If set to True, child resources will also be deleted. + Otherwise, the request will fail with FAILED_PRECONDITION error when + the Agent Engine has undeleted child resources. Defaults to False. + config (DeleteAgentEngineConfig): + Optional. Additional configurations for deleting the Agent Engine. + + """ + logger.info(f"Deleting AgentEngine resource: {name}") + operation = self._delete(name=name, force=force, config=config) + logger.info(f"Started AgentEngine delete operation: {operation.name}") + return operation + def create( self, *, @@ -1659,7 +1687,7 @@ async def _create( self._api_client._verify_response(return_value) return return_value - async def delete( + async def _delete( self, *, name: str, @@ -1994,6 +2022,34 @@ async def _update( _memories = None _sessions = None + async def delete( + self, + *, + name: str, + force: Optional[bool] = None, + config: Optional[types.DeleteAgentEngineConfigOrDict] = None, + ) -> types.DeleteAgentEngineOperation: + """ + Delete an Agent Engine resource. + + Args: + name (str): + Required. The name of the Agent Engine to be deleted. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}` + or `reasoningEngines/{resource_id}`. + force (bool): + Optional. If set to True, child resources will also be deleted. + Otherwise, the request will fail with FAILED_PRECONDITION error when + the Agent Engine has undeleted child resources. Defaults to False. + config (DeleteAgentEngineConfig): + Optional. Additional configurations for deleting the Agent Engine. + + """ + logger.info(f"Deleting AgentEngine resource: {name}") + operation = await self._delete(name=name, force=force, config=config) + logger.info(f"Started AgentEngine delete operation: {operation.name}") + return operation + @property def memories(self): if self._memories is None: diff --git a/vertexai/_genai/evals.py b/vertexai/_genai/evals.py index 5e792d1936..78ddccd49f 100644 --- a/vertexai/_genai/evals.py +++ b/vertexai/_genai/evals.py @@ -173,6 +173,20 @@ def _GenerateInstanceRubricsRequest_to_vertex( return to_object +def _GetEvaluationItemParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + def _GetEvaluationRunParameters_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -187,6 +201,20 @@ def _GetEvaluationRunParameters_to_vertex( return to_object +def _GetEvaluationSetParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + def _RubricBasedMetricInput_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -527,6 +555,106 @@ def _get_evaluation_run( self._api_client._verify_response(return_value) return return_value + def _get_evaluation_set( + self, *, name: str, config: Optional[types.GetEvaluationSetConfigOrDict] = None + ) -> types.EvaluationSet: + """ + Retrieves an EvaluationSet from the resource name. + """ + + parameter_model = types._GetEvaluationSetParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationSetParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationSets/{name}".format_map(request_url_dict) + else: + path = "evaluationSets/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationSet._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + def _get_evaluation_item( + self, *, name: str, config: Optional[types.GetEvaluationItemConfigOrDict] = None + ) -> types.EvaluationItem: + """ + Retrieves an EvaluationItem from the resource name. + """ + + parameter_model = types._GetEvaluationItemParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationItemParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationItems/{name}".format_map(request_url_dict) + else: + path = "evaluationItems/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationItem._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + def run(self) -> types.EvaluateInstancesResponse: """Evaluates an instance of a model. @@ -955,6 +1083,80 @@ def create_evaluation_run( config=config, ) + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_set method is experimental, " + "and may change in future versions." + ) + def get_evaluation_set( + self, + *, + name: str, + config: Optional[types.GetEvaluationSetConfigOrDict] = None, + ) -> types.EvaluationSet: + """Retrieves an EvaluationSet from the resource name. + + Args: + name: The resource name of the EvaluationSet. Format: + `projects/{project}/locations/{location}/evaluationSets/{evaluation_set}` + config: The optional configuration for the evaluation set. Must be a dict or + `types.GetEvaluationSetConfigOrDict` type. + + Returns: + The evaluation set. + """ + + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + return self._get_evaluation_set(name=name, config=config) + + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_item method is experimental, " + "and may change in future versions." + ) + def get_evaluation_item( + self, + *, + name: str, + config: Optional[types.GetEvaluationItemConfigOrDict] = None, + ) -> types.EvaluationItem: + """Retrieves an EvaluationItem from the resource name. + + Args: + name: The resource name of the EvaluationItem. Format: + `projects/{project}/locations/{location}/evaluationItems/{evaluation_item}` + config: The optional configuration for the evaluation item. Must be a dict or + `types.GetEvaluationItemConfigOrDict` type. + + Returns: + The evaluation item. + """ + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + result = self._get_evaluation_item(name=name, config=config) + if ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.RESULT + ): + result.evaluation_response = ( + _evals_common._convert_gcs_to_evaluation_item_result( + self._api_client, result.gcs_uri + ) + ) + elif ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.REQUEST + ): + result.evaluation_request = ( + _evals_common._convert_gcs_to_evaluation_item_request( + self._api_client, result.gcs_uri + ) + ) + return result + class AsyncEvals(_api_module.BaseModule): @@ -1217,6 +1419,110 @@ async def _get_evaluation_run( self._api_client._verify_response(return_value) return return_value + async def _get_evaluation_set( + self, *, name: str, config: Optional[types.GetEvaluationSetConfigOrDict] = None + ) -> types.EvaluationSet: + """ + Retrieves an EvaluationSet from the resource name. + """ + + parameter_model = types._GetEvaluationSetParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationSetParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationSets/{name}".format_map(request_url_dict) + else: + path = "evaluationSets/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationSet._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + async def _get_evaluation_item( + self, *, name: str, config: Optional[types.GetEvaluationItemConfigOrDict] = None + ) -> types.EvaluationItem: + """ + Retrieves an EvaluationItem from the resource name. + """ + + parameter_model = types._GetEvaluationItemParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationItemParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationItems/{name}".format_map(request_url_dict) + else: + path = "evaluationItems/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationItem._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + async def batch_evaluate( self, *, @@ -1355,3 +1661,79 @@ async def create_evaluation_run( ) return result + + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_set method is experimental, " + "and may change in future versions." + ) + async def get_evaluation_set( + self, + *, + name: str, + config: Optional[types.GetEvaluationSetConfigOrDict] = None, + ) -> types.EvaluationSet: + """Retrieves an EvaluationSet from the resource name. + + Args: + name: The resource name of the EvaluationSet. Format: + `projects/{project}/locations/{location}/evaluationSets/{evaluation_set}` + config: The optional configuration for the evaluation set. Must be a dict or + `types.GetEvaluationSetConfigOrDict` type. + + Returns: + The evaluation set. + """ + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + result = await self._get_evaluation_set(name=name, config=config) + + return result + + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_item method is experimental, " + "and may change in future versions." + ) + async def get_evaluation_item( + self, + *, + name: str, + config: Optional[types.GetEvaluationItemConfigOrDict] = None, + ) -> types.EvaluationItem: + """Retrieves an EvaluationItem from the resource name. + + Args: + name: The resource name of the EvaluationItem. Format: + `projects/{project}/locations/{location}/evaluationItems/{evaluation_item}` + config: The optional configuration for the evaluation item. Must be a dict or + `types.GetEvaluationItemConfigOrDict` type. + + Returns: + The evaluation item. + """ + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + result = await self._get_evaluation_item(name=name, config=config) + if ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.RESULT + ): + result.evaluation_response = ( + _evals_common._convert_gcs_to_evaluation_item_result( + self._api_client, result.gcs_uri + ) + ) + elif ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.REQUEST + ): + result.evaluation_request = ( + _evals_common._convert_gcs_to_evaluation_item_request( + self._api_client, result.gcs_uri + ) + ) + + return result diff --git a/vertexai/_genai/memories.py b/vertexai/_genai/memories.py index 83eff3588f..2497b1d167 100644 --- a/vertexai/_genai/memories.py +++ b/vertexai/_genai/memories.py @@ -16,6 +16,7 @@ # Code generated by the Google Gen AI SDK generator DO NOT EDIT. import functools +import importlib import json import logging from typing import Any, Iterator, Optional, Union @@ -97,6 +98,25 @@ def _DeleteAgentEngineMemoryRequestParameters_to_vertex( return to_object +def _GenerateAgentEngineMemoriesConfig_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + + if getv(from_object, ["disable_consolidation"]) is not None: + setv( + parent_object, + ["disableConsolidation"], + getv(from_object, ["disable_consolidation"]), + ) + + if getv(from_object, ["revision_labels"]) is not None: + setv(parent_object, ["revisionLabels"], getv(from_object, ["revision_labels"])) + + return to_object + + def _GenerateAgentEngineMemoriesRequestParameters_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -130,7 +150,13 @@ def _GenerateAgentEngineMemoriesRequestParameters_to_vertex( setv(to_object, ["scope"], getv(from_object, ["scope"])) if getv(from_object, ["config"]) is not None: - setv(to_object, ["config"], getv(from_object, ["config"])) + setv( + to_object, + ["config"], + _GenerateAgentEngineMemoriesConfig_to_vertex( + getv(from_object, ["config"]), to_object + ), + ) return to_object @@ -253,6 +279,23 @@ def _RetrieveAgentEngineMemoriesRequestParameters_to_vertex( return to_object +def _RollbackAgentEngineMemoryRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["target_revision_id"]) is not None: + setv(to_object, ["targetRevisionId"], getv(from_object, ["target_revision_id"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + def _UpdateAgentEngineMemoryConfig_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -781,6 +824,63 @@ def _retrieve( self._api_client._verify_response(return_value) return return_value + def _rollback( + self, + *, + name: str, + target_revision_id: str, + config: Optional[types.RollbackAgentEngineMemoryConfigOrDict] = None, + ) -> types.AgentEngineRollbackMemoryOperation: + """ + Rollback a memory to a previous revision. + """ + + parameter_model = types._RollbackAgentEngineMemoryRequestParameters( + name=name, + target_revision_id=target_revision_id, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _RollbackAgentEngineMemoryRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}:rollback".format_map(request_url_dict) + else: + path = "{name}:rollback" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("post", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.AgentEngineRollbackMemoryOperation._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + def _update( self, *, @@ -840,6 +940,25 @@ def _update( self._api_client._verify_response(return_value) return return_value + _revisions = None + + @property + def revisions(self): + if self._revisions is None: + try: + # We need to lazy load the revisions module to handle the + # possibility of ImportError when dependencies are not installed. + self._revisions = importlib.import_module( + ".memory_revisions", __package__ + ) + except ImportError as e: + raise ImportError( + "The 'agent_engines.memories.revisions' module requires " + "additional packages. Please install them using pip install " + "google-cloud-aiplatform[agent_engines]" + ) from e + return self._revisions.MemoryRevisions(self._api_client) + def create( self, *, @@ -933,6 +1052,10 @@ def generate( AgentEngineGenerateMemoriesOperation: The operation for generating the memories. """ + if config is None: + config = types.GenerateAgentEngineMemoriesConfig() + elif isinstance(config, dict): + config = types.GenerateAgentEngineMemoriesConfig.model_validate(config) operation = self._generate( name=name, vertex_session_source=vertex_session_source, @@ -941,10 +1064,6 @@ def generate( scope=scope, config=config, ) - if config is None: - config = types.GenerateAgentEngineMemoriesConfig() - elif isinstance(config, dict): - config = types.GenerateAgentEngineMemoriesConfig.model_validate(config) if config.wait_for_completion and not operation.done: operation = _agent_engines_utils._await_operation( operation_name=operation.name, @@ -1033,6 +1152,46 @@ def retrieve( config, ) + def rollback( + self, + *, + name: str, + target_revision_id: str, + config: Optional[types.RollbackAgentEngineMemoryConfigOrDict] = None, + ) -> types.AgentEngineRollbackMemoryOperation: + """Rolls back a memory to a previous revision. + + Args: + name (str): + Required. The name of the memory to rollback. + target_revision_id (str): + Required. The revision ID to roll back to + config (RollbackAgentEngineMemoryConfig): + Optional. The configuration for the rollback. + + Returns: + AgentEngineRollbackMemoryOperation: + The operation for rolling back the memory. + """ + if config is None: + config = types.RollbackAgentEngineMemoryConfig() + elif isinstance(config, dict): + config = types.RollbackAgentEngineMemoryConfig.model_validate(config) + operation = self._rollback( + name=name, + target_revision_id=target_revision_id, + config=config, + ) + if config.wait_for_completion and not operation.done: + operation = _agent_engines_utils._await_operation( + operation_name=operation.name, + get_operation_fn=self._get_rollback_memory_operation, + poll_interval_seconds=0.5, + ) + if operation.error: + raise RuntimeError(f"Failed to rollback memory: {operation.error}") + return operation + class AsyncMemories(_api_module.BaseModule): @@ -1526,6 +1685,65 @@ async def _retrieve( self._api_client._verify_response(return_value) return return_value + async def _rollback( + self, + *, + name: str, + target_revision_id: str, + config: Optional[types.RollbackAgentEngineMemoryConfigOrDict] = None, + ) -> types.AgentEngineRollbackMemoryOperation: + """ + Rollback a memory to a previous revision. + """ + + parameter_model = types._RollbackAgentEngineMemoryRequestParameters( + name=name, + target_revision_id=target_revision_id, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _RollbackAgentEngineMemoryRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}:rollback".format_map(request_url_dict) + else: + path = "{name}:rollback" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "post", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.AgentEngineRollbackMemoryOperation._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + async def _update( self, *, diff --git a/vertexai/_genai/memory_revisions.py b/vertexai/_genai/memory_revisions.py new file mode 100644 index 0000000000..53541c0b4c --- /dev/null +++ b/vertexai/_genai/memory_revisions.py @@ -0,0 +1,382 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +import functools +import json +import logging +from typing import Any, Iterator, Optional, Union +from urllib.parse import urlencode + +from google.genai import _api_module +from google.genai import _common +from google.genai._common import get_value_by_path as getv +from google.genai._common import set_value_by_path as setv +from google.genai.pagers import Pager + +from . import types + + +logger = logging.getLogger("vertexai_genai.memoryrevisions") + +logger.setLevel(logging.INFO) + + +def _GetAgentEngineMemoryRevisionRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + +def _ListAgentEngineMemoryRevisionsConfig_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + + if getv(from_object, ["page_size"]) is not None: + setv(parent_object, ["_query", "pageSize"], getv(from_object, ["page_size"])) + + if getv(from_object, ["page_token"]) is not None: + setv(parent_object, ["_query", "pageToken"], getv(from_object, ["page_token"])) + + if getv(from_object, ["filter"]) is not None: + setv(parent_object, ["_query", "filter"], getv(from_object, ["filter"])) + + return to_object + + +def _ListAgentEngineMemoryRevisionsRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv( + to_object, + ["config"], + _ListAgentEngineMemoryRevisionsConfig_to_vertex( + getv(from_object, ["config"]), to_object + ), + ) + + return to_object + + +class MemoryRevisions(_api_module.BaseModule): + + def get( + self, + *, + name: str, + config: Optional[types.GetAgentEngineMemoryRevisionConfigOrDict] = None, + ) -> types.MemoryRevision: + """ + Gets an agent engine memory revision. + + Args: + name (str): Required. The name of the Agent Engine memory revision to get. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/memories/{memory_id}/revisions/{revision_id}`. + config (GetAgentEngineMemoryRevisionConfig): + Optional. Additional configurations for getting the Agent Engine memory revision. + + Returns: + AgentEngineMemoryRevision: The requested Agent Engine memory revision. + + """ + + parameter_model = types._GetAgentEngineMemoryRevisionRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetAgentEngineMemoryRevisionRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}".format_map(request_url_dict) + else: + path = "{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.MemoryRevision._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + def _list( + self, + *, + name: str, + config: Optional[types.ListAgentEngineMemoryRevisionsConfigOrDict] = None, + ) -> types.ListAgentEngineMemoryRevisionsResponse: + """ + Lists Agent Engine memory revisions. + + Args: + name (str): Required. The name of the Agent Engine memory to list revisions for. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/memories/{memory_id}`. + config (ListAgentEngineMemoryRevisionsConfig): + Optional. Additional configurations for listing the Agent Engine memory revisions. + + Returns: + ListAgentEngineMemoryRevisionsResponse: The requested Agent Engine memory revisions. + + """ + + parameter_model = types._ListAgentEngineMemoryRevisionsRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _ListAgentEngineMemoryRevisionsRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}/revisions".format_map(request_url_dict) + else: + path = "{name}/revisions" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.ListAgentEngineMemoryRevisionsResponse._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + def list( + self, + *, + name: str, + config: Optional[types.ListAgentEngineMemoryRevisionsConfigOrDict] = None, + ) -> Iterator[types.MemoryRevision]: + """Lists Agent Engine memory revisions. + + Args: + name (str): + Required. The name of the Memory to list revisions for. + config (ListAgentEngineMemoryRevisionsConfigOrDict): + Optional. The configuration for the memories to list revisions. + + Returns: + Iterable[MemoryRevision]: An iterable of memory revisions. + """ + + return Pager( + "memory_revisions", + functools.partial(self._list, name=name), + self._list(name=name, config=config), + config, + ) + + +class AsyncMemoryRevisions(_api_module.BaseModule): + + async def get( + self, + *, + name: str, + config: Optional[types.GetAgentEngineMemoryRevisionConfigOrDict] = None, + ) -> types.MemoryRevision: + """ + Gets an agent engine memory revision. + + Args: + name (str): Required. The name of the Agent Engine memory revision to get. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/memories/{memory_id}/revisions/{revision_id}`. + config (GetAgentEngineMemoryRevisionConfig): + Optional. Additional configurations for getting the Agent Engine memory revision. + + Returns: + AgentEngineMemoryRevision: The requested Agent Engine memory revision. + + """ + + parameter_model = types._GetAgentEngineMemoryRevisionRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetAgentEngineMemoryRevisionRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}".format_map(request_url_dict) + else: + path = "{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.MemoryRevision._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + async def _list( + self, + *, + name: str, + config: Optional[types.ListAgentEngineMemoryRevisionsConfigOrDict] = None, + ) -> types.ListAgentEngineMemoryRevisionsResponse: + """ + Lists Agent Engine memory revisions. + + Args: + name (str): Required. The name of the Agent Engine memory to list revisions for. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/memories/{memory_id}`. + config (ListAgentEngineMemoryRevisionsConfig): + Optional. Additional configurations for listing the Agent Engine memory revisions. + + Returns: + ListAgentEngineMemoryRevisionsResponse: The requested Agent Engine memory revisions. + + """ + + parameter_model = types._ListAgentEngineMemoryRevisionsRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _ListAgentEngineMemoryRevisionsRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}/revisions".format_map(request_url_dict) + else: + path = "{name}/revisions" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.ListAgentEngineMemoryRevisionsResponse._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value diff --git a/vertexai/_genai/sandboxes.py b/vertexai/_genai/sandboxes.py index 8f177f6a70..666ca80121 100644 --- a/vertexai/_genai/sandboxes.py +++ b/vertexai/_genai/sandboxes.py @@ -15,10 +15,10 @@ # Code generated by the Google Gen AI SDK generator DO NOT EDIT. -import base64 import functools import json import logging +import mimetypes from typing import Any, Iterator, Optional, Union from urllib.parse import urlencode @@ -618,20 +618,52 @@ def execute_code( Returns: ExecuteSandboxEnvironmentResponse: The response from executing the code. """ - json_string = json.dumps(input_data) - - base64_bytes = base64.b64encode(json_string.encode("utf-8")) - base64_string = base64_bytes.decode("utf-8") + input_chunks = [] + + if input_data.get("code") is not None: + code = input_data.get("code", "") + json_code = json.dumps({"code": code}).encode("utf-8") + input_chunks.append( + types.Chunk( + mime_type="application/json", + data=json_code, + ) + ) - # Only single JSON input is supported for now. - inputs = [{"mime_type": "application/json", "data": base64_string}] + for file in input_data.get("files", []): + file_name = file.get("name", "") + input_chunks.append( + types.Chunk( + mime_type=file.get("mimeType", ""), + data=file.get("content", b""), + metadata={"attributes": {"file_name": file_name.encode("utf-8")}}, + ) + ) response = self._execute_code( name=name, - inputs=inputs, + inputs=input_chunks, config=config, ) + output_chunks = [] + for output in response.outputs: + if output.mime_type is None: + # if mime_type is not available, try to guess the mime_type from the file_name. + if ( + output.metadata is not None + and output.metadata.attributes is not None + ): + file_name = output.metadata.attributes.get("file_name", b"").decode( + "utf-8" + ) + mime_type, _ = mimetypes.guess_type(file_name) + output.mime_type = mime_type + + output_chunks.append(output) + + response = types.ExecuteSandboxEnvironmentResponse(outputs=output_chunks) + return response def get( diff --git a/vertexai/_genai/session_events.py b/vertexai/_genai/session_events.py index 1ce8284607..2f7a820c29 100644 --- a/vertexai/_genai/session_events.py +++ b/vertexai/_genai/session_events.py @@ -26,7 +26,7 @@ from google.genai import _common from google.genai._common import get_value_by_path as getv from google.genai._common import set_value_by_path as setv -from google.genai.pagers import Pager +from google.genai.pagers import AsyncPager, Pager from . import types @@ -142,7 +142,7 @@ def append( Appends Agent Engine session event. Args: - name (str): Required. The name of the Agent Engine session to append event for. Format: + name (str): Required. The name of the Agent Engine session to append the event to. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. author (str): Required. The author of the Agent Engine session event. invocation_id (str): Required. The invocation ID of the Agent Engine session event. @@ -313,7 +313,7 @@ async def append( Appends Agent Engine session event. Args: - name (str): Required. The name of the Agent Engine session to append event for. Format: + name (str): Required. The name of the Agent Engine session to append the event to. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. author (str): Required. The author of the Agent Engine session event. invocation_id (str): Required. The invocation ID of the Agent Engine session event. @@ -442,3 +442,32 @@ async def _list( self._api_client._verify_response(return_value) return return_value + + async def list( + self, + *, + name: str, + config: Optional[types.ListAgentEngineSessionEventsConfigOrDict] = None, + ) -> AsyncPager[types.SessionEvent]: + """Lists Agent Engine session events. + + Args: + name (str): Required. The name of the agent engine to list session + events for. + config (ListAgentEngineSessionEventsConfig): Optional. The configuration + for the session events to list. Currently, the `filter` field in + `config` only supports filtering by `timestamp`. The timestamp + value must be enclosed in double quotes and include the time zone + information. For example: + `config={'filter': 'timestamp>="2025-08-07T19:44:38.4Z"'}`. + + Returns: + AsyncPager[SessionEvent]: An async pager of session events. + """ + + return AsyncPager( + "session_events", + functools.partial(self._list, name=name), + await self._list(name=name, config=config), + config, + ) diff --git a/vertexai/_genai/sessions.py b/vertexai/_genai/sessions.py index 2588996022..c82550e59e 100644 --- a/vertexai/_genai/sessions.py +++ b/vertexai/_genai/sessions.py @@ -26,7 +26,7 @@ from google.genai import _common from google.genai._common import get_value_by_path as getv from google.genai._common import set_value_by_path as setv -from google.genai.pagers import Pager +from google.genai.pagers import AsyncPager, Pager from . import _agent_engines_utils from . import types @@ -225,7 +225,7 @@ def _create( Creates a new session in the Agent Engine. Args: - name (str): Required. The name of the Agent Engine session to be created. Format: + name (str): Required. The name of the Agent Engine to create the session under. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. user_id (str): Required. The user ID of the session. config (CreateAgentEngineSessionConfig): @@ -293,7 +293,7 @@ def delete( Args: name (str): Required. The name of the Agent Engine session to be deleted. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (DeleteAgentEngineSessionConfig): Optional. Additional configurations for deleting the Agent Engine session. @@ -358,7 +358,7 @@ def get( Args: name (str): Required. The name of the Agent Engine session to get. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (GetAgentEngineSessionConfig): Optional. Additional configurations for getting the Agent Engine session. @@ -539,7 +539,7 @@ def _update( Args: name (str): Required. The name of the Agent Engine session to be updated. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (UpdateAgentEngineSessionConfig): Optional. Additional configurations for updating the Agent Engine session. @@ -699,7 +699,7 @@ async def _create( Creates a new session in the Agent Engine. Args: - name (str): Required. The name of the Agent Engine session to be created. Format: + name (str): Required. The name of the Agent Engine to create the session under. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. user_id (str): Required. The user ID of the session. config (CreateAgentEngineSessionConfig): @@ -769,7 +769,7 @@ async def delete( Args: name (str): Required. The name of the Agent Engine session to be deleted. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (DeleteAgentEngineSessionConfig): Optional. Additional configurations for deleting the Agent Engine session. @@ -836,7 +836,7 @@ async def get( Args: name (str): Required. The name of the Agent Engine session to get. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (GetAgentEngineSessionConfig): Optional. Additional configurations for getting the Agent Engine session. @@ -1023,7 +1023,7 @@ async def _update( Args: name (str): Required. The name of the Agent Engine session to be updated. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (UpdateAgentEngineSessionConfig): Optional. Additional configurations for updating the Agent Engine session. @@ -1078,3 +1078,95 @@ async def _update( self._api_client._verify_response(return_value) return return_value + + _events = None + + @property + @_common.experimental_warning( + "The Vertex SDK GenAI agent_engines.sessions.events module is " + "experimental, and may change in future versions." + ) + def events(self): + if self._events is None: + try: + # We need to lazy load the sessions.events module to handle the + # possibility of ImportError when dependencies are not installed. + self._events = importlib.import_module(".session_events", __package__) + except ImportError as e: + raise ImportError( + "The 'agent_engines.sessions.events' module requires" + "additional packages. Please install them using pip install " + "google-cloud-aiplatform[agent_engines]" + ) from e + return self._events.AsyncSessionEvents(self._api_client) + + async def create( + self, + *, + name: str, + user_id: str, + config: Optional[types.CreateAgentEngineSessionConfigOrDict] = None, + ) -> types.AgentEngineSessionOperation: + """Creates a new session in the Agent Engine. + + Args: + name (str): + Required. The name of the agent engine to create the session for. + user_id (str): + Required. The user ID of the session. + config (CreateAgentEngineSessionConfig): + Optional. The configuration for the session to create. + + Returns: + AgentEngineSessionOperation: The operation for creating the session. + """ + if config is None: + config = types.CreateAgentEngineSessionConfig() + elif isinstance(config, dict): + config = types.CreateAgentEngineSessionConfig.model_validate(config) + operation = await self._create( + name=name, + user_id=user_id, + config=config, + ) + if config.wait_for_completion and not operation.done: + operation = await _agent_engines_utils._await_async_operation( + operation_name=operation.name, + get_operation_fn=self._get_session_operation, + poll_interval_seconds=0.5, + ) + if operation.response: + operation.response = await self.get(name=operation.response.name) + elif operation.error: + raise RuntimeError(f"Failed to create session: {operation.error}") + else: + raise RuntimeError( + "Error retrieving session from the operation response. " + f"Operation name: {operation.name}" + ) + return operation + + async def list( + self, + *, + name: str, + config: Optional[types.ListAgentEngineSessionsConfigOrDict] = None, + ) -> AsyncPager[types.Session]: + """Lists Agent Engine sessions. + + Args: + name (str): Required. The name of the agent engine to list sessions + for. + config (ListAgentEngineSessionConfig): Optional. The configuration + for the sessions to list. + + Returns: + AsyncPager[Session]: An async pager of sessions. + """ + + return AsyncPager( + "sessions", + functools.partial(self._list, name=name), + await self._list(name=name, config=config), + config, + ) diff --git a/vertexai/_genai/types.py b/vertexai/_genai/types.py index 5e3aaa2e90..5763dda42b 100644 --- a/vertexai/_genai/types.py +++ b/vertexai/_genai/types.py @@ -333,6 +333,17 @@ class Importance(_common.CaseInSensitiveEnum): """Low importance.""" +class EvaluationItemType(_common.CaseInSensitiveEnum): + """The type of the EvaluationItem.""" + + EVALUATION_ITEM_TYPE_UNSPECIFIED = "EVALUATION_ITEM_TYPE_UNSPECIFIED" + """The default value. This value is unused.""" + REQUEST = "REQUEST" + """The EvaluationItem is a request to evaluate.""" + RESULT = "RESULT" + """The EvaluationItem is the result of evaluation.""" + + class GenerateMemoriesResponseGeneratedMemoryAction(_common.CaseInSensitiveEnum): """The action to take.""" @@ -578,6 +589,20 @@ class EvaluationRun(_common.BaseModel): default=None, description="""The results for the evaluation run.""" ) + def show(self) -> None: + """Shows the evaluation result.""" + from . import _evals_visualization + + if self.state == "SUCCEEDED": + eval_result = _evals_visualization._get_eval_result_from_eval_run( + self.evaluation_results + ) + _evals_visualization.display_evaluation_result(eval_result, None) + else: + logger.warning(f"Evaluation Run state: {self.state}.") + if self.error: + logger.warning(f"Evaluation Run error: {self.error.message}") + class EvaluationRunDict(TypedDict, total=False): """Represents an evaluation run.""" @@ -2695,6 +2720,453 @@ class _GetEvaluationRunParametersDict(TypedDict, total=False): ] +class GetEvaluationSetConfig(_common.BaseModel): + """Config for get evaluation set.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetEvaluationSetConfigDict(TypedDict, total=False): + """Config for get evaluation set.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +GetEvaluationSetConfigOrDict = Union[GetEvaluationSetConfig, GetEvaluationSetConfigDict] + + +class _GetEvaluationSetParameters(_common.BaseModel): + """Represents a job that gets an evaluation set.""" + + name: Optional[str] = Field(default=None, description="""""") + config: Optional[GetEvaluationSetConfig] = Field(default=None, description="""""") + + +class _GetEvaluationSetParametersDict(TypedDict, total=False): + """Represents a job that gets an evaluation set.""" + + name: Optional[str] + """""" + + config: Optional[GetEvaluationSetConfigDict] + """""" + + +_GetEvaluationSetParametersOrDict = Union[ + _GetEvaluationSetParameters, _GetEvaluationSetParametersDict +] + + +class EvaluationSet(_common.BaseModel): + """Represents an evaluation set.""" + + name: Optional[str] = Field( + default=None, description="""The resource name of the evaluation set.""" + ) + display_name: Optional[str] = Field( + default=None, description="""The display name of the evaluation set.""" + ) + evaluation_items: Optional[list[str]] = Field( + default=None, + description="""The EvaluationItems that are part of this dataset.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, description="""The create time of the evaluation set.""" + ) + update_time: Optional[datetime.datetime] = Field( + default=None, description="""The update time of the evaluation set.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="""The metadata of the evaluation set.""" + ) + + +class EvaluationSetDict(TypedDict, total=False): + """Represents an evaluation set.""" + + name: Optional[str] + """The resource name of the evaluation set.""" + + display_name: Optional[str] + """The display name of the evaluation set.""" + + evaluation_items: Optional[list[str]] + """The EvaluationItems that are part of this dataset.""" + + create_time: Optional[datetime.datetime] + """The create time of the evaluation set.""" + + update_time: Optional[datetime.datetime] + """The update time of the evaluation set.""" + + metadata: Optional[dict[str, Any]] + """The metadata of the evaluation set.""" + + +EvaluationSetOrDict = Union[EvaluationSet, EvaluationSetDict] + + +class GetEvaluationItemConfig(_common.BaseModel): + """Config for get evaluation item.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetEvaluationItemConfigDict(TypedDict, total=False): + """Config for get evaluation item.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +GetEvaluationItemConfigOrDict = Union[ + GetEvaluationItemConfig, GetEvaluationItemConfigDict +] + + +class _GetEvaluationItemParameters(_common.BaseModel): + """Represents a job that gets an evaluation item.""" + + name: Optional[str] = Field(default=None, description="""""") + config: Optional[GetEvaluationItemConfig] = Field(default=None, description="""""") + + +class _GetEvaluationItemParametersDict(TypedDict, total=False): + """Represents a job that gets an evaluation item.""" + + name: Optional[str] + """""" + + config: Optional[GetEvaluationItemConfigDict] + """""" + + +_GetEvaluationItemParametersOrDict = Union[ + _GetEvaluationItemParameters, _GetEvaluationItemParametersDict +] + + +class PromptTemplateData(_common.BaseModel): + """Message to hold a prompt template and the values to populate the template.""" + + values: Optional[dict[str, genai_types.Content]] = Field( + default=None, description="""The values for fields in the prompt template.""" + ) + + +class PromptTemplateDataDict(TypedDict, total=False): + """Message to hold a prompt template and the values to populate the template.""" + + values: Optional[dict[str, genai_types.ContentDict]] + """The values for fields in the prompt template.""" + + +PromptTemplateDataOrDict = Union[PromptTemplateData, PromptTemplateDataDict] + + +class EvaluationPrompt(_common.BaseModel): + """Represents the prompt to be evaluated.""" + + text: Optional[str] = Field(default=None, description="""Text prompt.""") + value: Optional[dict[str, Any]] = Field( + default=None, + description="""Fields and values that can be used to populate the prompt template.""", + ) + prompt_template_data: Optional[PromptTemplateData] = Field( + default=None, description="""Prompt template data.""" + ) + + +class EvaluationPromptDict(TypedDict, total=False): + """Represents the prompt to be evaluated.""" + + text: Optional[str] + """Text prompt.""" + + value: Optional[dict[str, Any]] + """Fields and values that can be used to populate the prompt template.""" + + prompt_template_data: Optional[PromptTemplateDataDict] + """Prompt template data.""" + + +EvaluationPromptOrDict = Union[EvaluationPrompt, EvaluationPromptDict] + + +class CandidateResponse(_common.BaseModel): + """Responses from model or agent.""" + + candidate: Optional[str] = Field( + default=None, + description="""The name of the candidate that produced the response.""", + ) + text: Optional[str] = Field(default=None, description="""The text response.""") + value: Optional[dict[str, Any]] = Field( + default=None, + description="""Fields and values that can be used to populate the response template.""", + ) + events: Optional[list[genai_types.Content]] = Field( + default=None, + description="""Intermediate events (such as tool calls and responses) that led to the final response.""", + ) + + +class CandidateResponseDict(TypedDict, total=False): + """Responses from model or agent.""" + + candidate: Optional[str] + """The name of the candidate that produced the response.""" + + text: Optional[str] + """The text response.""" + + value: Optional[dict[str, Any]] + """Fields and values that can be used to populate the response template.""" + + events: Optional[list[genai_types.ContentDict]] + """Intermediate events (such as tool calls and responses) that led to the final response.""" + + +CandidateResponseOrDict = Union[CandidateResponse, CandidateResponseDict] + + +class EvaluationItemRequest(_common.BaseModel): + """Single evaluation request.""" + + prompt: Optional[EvaluationPrompt] = Field( + default=None, description="""The request/prompt to evaluate.""" + ) + golden_response: Optional[CandidateResponse] = Field( + default=None, description="""The ideal response or ground truth.""" + ) + rubrics: Optional[dict[str, "RubricGroup"]] = Field( + default=None, + description="""Named groups of rubrics associated with this prompt. The key is a user-defined name for the rubric group.""", + ) + candidate_responses: Optional[list[CandidateResponse]] = Field( + default=None, + description="""Responses from model under test and other baseline models for comparison.""", + ) + + +class EvaluationItemRequestDict(TypedDict, total=False): + """Single evaluation request.""" + + prompt: Optional[EvaluationPromptDict] + """The request/prompt to evaluate.""" + + golden_response: Optional[CandidateResponseDict] + """The ideal response or ground truth.""" + + rubrics: Optional[dict[str, "RubricGroupDict"]] + """Named groups of rubrics associated with this prompt. The key is a user-defined name for the rubric group.""" + + candidate_responses: Optional[list[CandidateResponseDict]] + """Responses from model under test and other baseline models for comparison.""" + + +EvaluationItemRequestOrDict = Union[EvaluationItemRequest, EvaluationItemRequestDict] + + +class CandidateResult(_common.BaseModel): + """Result for a single candidate.""" + + candidate: Optional[str] = Field( + default=None, + description="""The candidate that is being evaluated. The value is the same as the candidate name in the EvaluationRequest.""", + ) + metric: Optional[str] = Field( + default=None, description="""The metric that was evaluated.""" + ) + score: Optional[float] = Field( + default=None, description="""The score of the metric.""" + ) + explanation: Optional[str] = Field( + default=None, description="""The explanation for the metric.""" + ) + rubric_verdicts: Optional[list[RubricVerdict]] = Field( + default=None, description="""The rubric verdicts for the metric.""" + ) + additional_results: Optional[dict[str, Any]] = Field( + default=None, description="""Additional results for the metric.""" + ) + + +class CandidateResultDict(TypedDict, total=False): + """Result for a single candidate.""" + + candidate: Optional[str] + """The candidate that is being evaluated. The value is the same as the candidate name in the EvaluationRequest.""" + + metric: Optional[str] + """The metric that was evaluated.""" + + score: Optional[float] + """The score of the metric.""" + + explanation: Optional[str] + """The explanation for the metric.""" + + rubric_verdicts: Optional[list[RubricVerdictDict]] + """The rubric verdicts for the metric.""" + + additional_results: Optional[dict[str, Any]] + """Additional results for the metric.""" + + +CandidateResultOrDict = Union[CandidateResult, CandidateResultDict] + + +class EvaluationItemResult(_common.BaseModel): + """Represents the result of an evaluation item.""" + + evaluation_request: Optional[str] = Field( + default=None, description="""The request item that was evaluated.""" + ) + evaluation_run: Optional[str] = Field( + default=None, + description="""The evaluation run that was used to generate the result.""", + ) + request: Optional[EvaluationItemRequest] = Field( + default=None, description="""The request that was evaluated.""" + ) + metric: Optional[str] = Field( + default=None, description="""The metric that was evaluated.""" + ) + candidate_results: Optional[list[CandidateResult]] = Field( + default=None, description="""TThe results for the metric.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="""Metadata about the evaluation result.""" + ) + + +class EvaluationItemResultDict(TypedDict, total=False): + """Represents the result of an evaluation item.""" + + evaluation_request: Optional[str] + """The request item that was evaluated.""" + + evaluation_run: Optional[str] + """The evaluation run that was used to generate the result.""" + + request: Optional[EvaluationItemRequestDict] + """The request that was evaluated.""" + + metric: Optional[str] + """The metric that was evaluated.""" + + candidate_results: Optional[list[CandidateResultDict]] + """TThe results for the metric.""" + + metadata: Optional[dict[str, Any]] + """Metadata about the evaluation result.""" + + +EvaluationItemResultOrDict = Union[EvaluationItemResult, EvaluationItemResultDict] + + +class EvaluationItem(_common.BaseModel): + """EvaluationItem is a single evaluation request or result. + + The content of an EvaluationItem is immutable - it cannot be updated once + created. EvaluationItems can be deleted when no longer needed. + """ + + name: Optional[str] = Field( + default=None, description="""The resource name of the EvaluationItem.""" + ) + display_name: Optional[str] = Field( + default=None, description="""The display name of the EvaluationItem.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="""Metadata for the EvaluationItem.""" + ) + labels: Optional[dict[str, str]] = Field( + default=None, description="""Labels for the EvaluationItem.""" + ) + evaluation_item_type: Optional[EvaluationItemType] = Field( + default=None, description="""The type of the EvaluationItem.""" + ) + evaluation_request: Optional[EvaluationItemRequest] = Field( + default=None, description="""The request to evaluate.""" + ) + evaluation_response: Optional[EvaluationItemResult] = Field( + default=None, description="""The response from evaluation.""" + ) + gcs_uri: Optional[str] = Field( + default=None, + description="""The Cloud Storage object where the request or response is stored.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, description="""Timestamp when this item was created.""" + ) + error: Optional[genai_types.GoogleRpcStatus] = Field( + default=None, description="""Error for the evaluation item.""" + ) + + # TODO(b/448806531): Remove all the overridden _from_response methods once the + # ticket is resolved and published. + @classmethod + def _from_response( + cls: typing.Type["EvaluationItem"], + *, + response: dict[str, object], + kwargs: dict[str, object], + ) -> "EvaluationItem": + """Converts a dictionary response into a EvaluationItem object.""" + + response = _camel_key_to_snake(response) + result = super()._from_response(response=response, kwargs=kwargs) + return result + + +class EvaluationItemDict(TypedDict, total=False): + """EvaluationItem is a single evaluation request or result. + + The content of an EvaluationItem is immutable - it cannot be updated once + created. EvaluationItems can be deleted when no longer needed. + """ + + name: Optional[str] + """The resource name of the EvaluationItem.""" + + display_name: Optional[str] + """The display name of the EvaluationItem.""" + + metadata: Optional[dict[str, Any]] + """Metadata for the EvaluationItem.""" + + labels: Optional[dict[str, str]] + """Labels for the EvaluationItem.""" + + evaluation_item_type: Optional[EvaluationItemType] + """The type of the EvaluationItem.""" + + evaluation_request: Optional[EvaluationItemRequestDict] + """The request to evaluate.""" + + evaluation_response: Optional[EvaluationItemResultDict] + """The response from evaluation.""" + + gcs_uri: Optional[str] + """The Cloud Storage object where the request or response is stored.""" + + create_time: Optional[datetime.datetime] + """Timestamp when this item was created.""" + + error: Optional[genai_types.GoogleRpcStatusDict] + """Error for the evaluation item.""" + + +EvaluationItemOrDict = Union[EvaluationItem, EvaluationItemDict] + + class OptimizeConfig(_common.BaseModel): """Config for Prompt Optimizer.""" @@ -5334,6 +5806,10 @@ class GenerateAgentEngineMemoriesConfig(_common.BaseModel): default=True, description="""Waits for the operation to complete before returning.""", ) + revision_labels: Optional[dict[str, str]] = Field( + default=None, + description="""Labels to apply to the memory revision. For example, you can use this to label a revision with its data source.""", + ) class GenerateAgentEngineMemoriesConfigDict(TypedDict, total=False): @@ -5353,6 +5829,9 @@ class GenerateAgentEngineMemoriesConfigDict(TypedDict, total=False): wait_for_completion: Optional[bool] """Waits for the operation to complete before returning.""" + revision_labels: Optional[dict[str, str]] + """Labels to apply to the memory revision. For example, you can use this to label a revision with its data source.""" + GenerateAgentEngineMemoriesConfigOrDict = Union[ GenerateAgentEngineMemoriesConfig, GenerateAgentEngineMemoriesConfigDict @@ -5438,6 +5917,14 @@ class GenerateMemoriesResponseGeneratedMemory(_common.BaseModel): action: Optional[GenerateMemoriesResponseGeneratedMemoryAction] = Field( default=None, description="""The action to take.""" ) + previous_revision: Optional[str] = Field( + default=None, + description="""The previous revision of the Memory before the action was performed. This + field is only set if the action is `UPDATED` or `DELETED`. You can use + this to rollback the Memory to the previous revision, undoing the action. + Format: + `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/memories/{memory}/revisions/{revision}`""", + ) class GenerateMemoriesResponseGeneratedMemoryDict(TypedDict, total=False): @@ -5449,6 +5936,13 @@ class GenerateMemoriesResponseGeneratedMemoryDict(TypedDict, total=False): action: Optional[GenerateMemoriesResponseGeneratedMemoryAction] """The action to take.""" + previous_revision: Optional[str] + """The previous revision of the Memory before the action was performed. This + field is only set if the action is `UPDATED` or `DELETED`. You can use + this to rollback the Memory to the previous revision, undoing the action. + Format: + `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/memories/{memory}/revisions/{revision}`""" + GenerateMemoriesResponseGeneratedMemoryOrDict = Union[ GenerateMemoriesResponseGeneratedMemory, GenerateMemoriesResponseGeneratedMemoryDict @@ -5932,6 +6426,108 @@ class RetrieveMemoriesResponseDict(TypedDict, total=False): ] +class RollbackAgentEngineMemoryConfig(_common.BaseModel): + """Config for rolling back a memory.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + wait_for_completion: Optional[bool] = Field( + default=True, + description="""Waits for the operation to complete before returning.""", + ) + + +class RollbackAgentEngineMemoryConfigDict(TypedDict, total=False): + """Config for rolling back a memory.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + wait_for_completion: Optional[bool] + """Waits for the operation to complete before returning.""" + + +RollbackAgentEngineMemoryConfigOrDict = Union[ + RollbackAgentEngineMemoryConfig, RollbackAgentEngineMemoryConfigDict +] + + +class _RollbackAgentEngineMemoryRequestParameters(_common.BaseModel): + """Parameters for generating agent engine memories.""" + + name: Optional[str] = Field( + default=None, description="""Name of the agent engine memory to rollback.""" + ) + target_revision_id: Optional[str] = Field( + default=None, description="""The ID of the revision to rollback to.""" + ) + config: Optional[RollbackAgentEngineMemoryConfig] = Field( + default=None, description="""""" + ) + + +class _RollbackAgentEngineMemoryRequestParametersDict(TypedDict, total=False): + """Parameters for generating agent engine memories.""" + + name: Optional[str] + """Name of the agent engine memory to rollback.""" + + target_revision_id: Optional[str] + """The ID of the revision to rollback to.""" + + config: Optional[RollbackAgentEngineMemoryConfigDict] + """""" + + +_RollbackAgentEngineMemoryRequestParametersOrDict = Union[ + _RollbackAgentEngineMemoryRequestParameters, + _RollbackAgentEngineMemoryRequestParametersDict, +] + + +class AgentEngineRollbackMemoryOperation(_common.BaseModel): + """Operation that rolls back a memory.""" + + name: Optional[str] = Field( + default=None, + description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""", + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""", + ) + done: Optional[bool] = Field( + default=None, + description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""", + ) + error: Optional[dict[str, Any]] = Field( + default=None, + description="""The error result of the operation in case of failure or cancellation.""", + ) + + +class AgentEngineRollbackMemoryOperationDict(TypedDict, total=False): + """Operation that rolls back a memory.""" + + name: Optional[str] + """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""" + + metadata: Optional[dict[str, Any]] + """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""" + + done: Optional[bool] + """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""" + + error: Optional[dict[str, Any]] + """The error result of the operation in case of failure or cancellation.""" + + +AgentEngineRollbackMemoryOperationOrDict = Union[ + AgentEngineRollbackMemoryOperation, AgentEngineRollbackMemoryOperationDict +] + + class UpdateAgentEngineMemoryConfig(_common.BaseModel): """Config for updating agent engine memory.""" @@ -6051,6 +6647,194 @@ class _UpdateAgentEngineMemoryRequestParametersDict(TypedDict, total=False): ] +class GetAgentEngineMemoryRevisionConfig(_common.BaseModel): + """Config for getting an Agent Engine Memory Revision.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetAgentEngineMemoryRevisionConfigDict(TypedDict, total=False): + """Config for getting an Agent Engine Memory Revision.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +GetAgentEngineMemoryRevisionConfigOrDict = Union[ + GetAgentEngineMemoryRevisionConfig, GetAgentEngineMemoryRevisionConfigDict +] + + +class _GetAgentEngineMemoryRevisionRequestParameters(_common.BaseModel): + """Parameters for getting an Agent Engine memory revision.""" + + name: Optional[str] = Field( + default=None, description="""Name of the agent engine.""" + ) + config: Optional[GetAgentEngineMemoryRevisionConfig] = Field( + default=None, description="""""" + ) + + +class _GetAgentEngineMemoryRevisionRequestParametersDict(TypedDict, total=False): + """Parameters for getting an Agent Engine memory revision.""" + + name: Optional[str] + """Name of the agent engine.""" + + config: Optional[GetAgentEngineMemoryRevisionConfigDict] + """""" + + +_GetAgentEngineMemoryRevisionRequestParametersOrDict = Union[ + _GetAgentEngineMemoryRevisionRequestParameters, + _GetAgentEngineMemoryRevisionRequestParametersDict, +] + + +class MemoryRevision(_common.BaseModel): + """A memory revision.""" + + name: Optional[str] = Field( + default=None, + description="""Identifier. The resource name of the Memory Revision. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/memories/{memory}/revisions/{memory_revision}`""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Timestamp when this Memory Revision was created.""", + ) + expire_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Timestamp of when this resource is considered expired.""", + ) + fact: Optional[str] = Field( + default=None, + description="""Output only. The fact of the Memory Revision. This corresponds to the `fact` field of the parent Memory at the time of revision creation.""", + ) + labels: Optional[dict[str, str]] = Field( + default=None, + description="""Output only. The labels of the Memory Revision. These labels are applied to the MemoryRevision when it is created based on `GenerateMemoriesRequest.revision_labels`.""", + ) + + +class MemoryRevisionDict(TypedDict, total=False): + """A memory revision.""" + + name: Optional[str] + """Identifier. The resource name of the Memory Revision. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/memories/{memory}/revisions/{memory_revision}`""" + + create_time: Optional[datetime.datetime] + """Output only. Timestamp when this Memory Revision was created.""" + + expire_time: Optional[datetime.datetime] + """Output only. Timestamp of when this resource is considered expired.""" + + fact: Optional[str] + """Output only. The fact of the Memory Revision. This corresponds to the `fact` field of the parent Memory at the time of revision creation.""" + + labels: Optional[dict[str, str]] + """Output only. The labels of the Memory Revision. These labels are applied to the MemoryRevision when it is created based on `GenerateMemoriesRequest.revision_labels`.""" + + +MemoryRevisionOrDict = Union[MemoryRevision, MemoryRevisionDict] + + +class ListAgentEngineMemoryRevisionsConfig(_common.BaseModel): + """Config for listing Agent Engine memory revisions.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + page_size: Optional[int] = Field(default=None, description="""""") + page_token: Optional[str] = Field(default=None, description="""""") + filter: Optional[str] = Field( + default=None, + description="""An expression for filtering the results of the request. + For field names both snake_case and camelCase are supported.""", + ) + + +class ListAgentEngineMemoryRevisionsConfigDict(TypedDict, total=False): + """Config for listing Agent Engine memory revisions.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + page_size: Optional[int] + """""" + + page_token: Optional[str] + """""" + + filter: Optional[str] + """An expression for filtering the results of the request. + For field names both snake_case and camelCase are supported.""" + + +ListAgentEngineMemoryRevisionsConfigOrDict = Union[ + ListAgentEngineMemoryRevisionsConfig, ListAgentEngineMemoryRevisionsConfigDict +] + + +class _ListAgentEngineMemoryRevisionsRequestParameters(_common.BaseModel): + """Parameters for listing Agent Engine memory revisions.""" + + name: Optional[str] = Field( + default=None, description="""Name of the Agent Engine memory""" + ) + config: Optional[ListAgentEngineMemoryRevisionsConfig] = Field( + default=None, description="""""" + ) + + +class _ListAgentEngineMemoryRevisionsRequestParametersDict(TypedDict, total=False): + """Parameters for listing Agent Engine memory revisions.""" + + name: Optional[str] + """Name of the Agent Engine memory""" + + config: Optional[ListAgentEngineMemoryRevisionsConfigDict] + """""" + + +_ListAgentEngineMemoryRevisionsRequestParametersOrDict = Union[ + _ListAgentEngineMemoryRevisionsRequestParameters, + _ListAgentEngineMemoryRevisionsRequestParametersDict, +] + + +class ListAgentEngineMemoryRevisionsResponse(_common.BaseModel): + """Response for listing agent engine memory revisions.""" + + sdk_http_response: Optional[genai_types.HttpResponse] = Field( + default=None, description="""Used to retain the full HTTP response.""" + ) + next_page_token: Optional[str] = Field(default=None, description="""""") + memory_revisions: Optional[list[MemoryRevision]] = Field( + default=None, description="""List of memory revisions.""" + ) + + +class ListAgentEngineMemoryRevisionsResponseDict(TypedDict, total=False): + """Response for listing agent engine memory revisions.""" + + sdk_http_response: Optional[genai_types.HttpResponseDict] + """Used to retain the full HTTP response.""" + + next_page_token: Optional[str] + """""" + + memory_revisions: Optional[list[MemoryRevisionDict]] + """List of memory revisions.""" + + +ListAgentEngineMemoryRevisionsResponseOrDict = Union[ + ListAgentEngineMemoryRevisionsResponse, ListAgentEngineMemoryRevisionsResponseDict +] + + class SandboxEnvironmentSpecCodeExecutionEnvironment(_common.BaseModel): """The code execution environment with customized settings.""" @@ -6995,7 +7779,7 @@ class _GetAgentEngineSessionRequestParameters(_common.BaseModel): """Parameters for getting an agent engine session.""" name: Optional[str] = Field( - default=None, description="""Name of the agent engine.""" + default=None, description="""Name of the agent engine session.""" ) config: Optional[GetAgentEngineSessionConfig] = Field( default=None, description="""""" @@ -7006,7 +7790,7 @@ class _GetAgentEngineSessionRequestParametersDict(TypedDict, total=False): """Parameters for getting an agent engine session.""" name: Optional[str] - """Name of the agent engine.""" + """Name of the agent engine session.""" config: Optional[GetAgentEngineSessionConfigDict] """""" @@ -7265,10 +8049,6 @@ class EventActions(_common.BaseModel): default=None, description="""Optional. If set, the event transfers to the specified agent.""", ) - transfer_to_agent: Optional[bool] = Field( - default=None, - description="""Deprecated. If set, the event transfers to the specified agent.""", - ) class EventActionsDict(TypedDict, total=False): @@ -7292,9 +8072,6 @@ class EventActionsDict(TypedDict, total=False): transfer_agent: Optional[str] """Optional. If set, the event transfers to the specified agent.""" - transfer_to_agent: Optional[bool] - """Deprecated. If set, the event transfers to the specified agent.""" - EventActionsOrDict = Union[EventActions, EventActionsDict] @@ -7510,7 +8287,7 @@ class ListAgentEngineSessionEventsConfigDict(TypedDict, total=False): class _ListAgentEngineSessionEventsRequestParameters(_common.BaseModel): - """Parameters for listing agent engines.""" + """Parameters for listing agent engine session events.""" name: Optional[str] = Field( default=None, description="""Name of the agent engine session.""" @@ -7521,7 +8298,7 @@ class _ListAgentEngineSessionEventsRequestParameters(_common.BaseModel): class _ListAgentEngineSessionEventsRequestParametersDict(TypedDict, total=False): - """Parameters for listing agent engines.""" + """Parameters for listing agent engine session events.""" name: Optional[str] """Name of the agent engine session.""" @@ -9926,8 +10703,8 @@ class EvalRunInferenceConfigDict(TypedDict, total=False): EvalRunInferenceConfigOrDict = Union[EvalRunInferenceConfig, EvalRunInferenceConfigDict] -class AgentMetadata(_common.BaseModel): - """AgentMetadata for agent eval.""" +class AgentInfo(_common.BaseModel): + """The agent info of an agent, used for agent eval.""" name: Optional[str] = Field( default=None, description="""Agent name, used as an identifier.""" @@ -9941,13 +10718,10 @@ class AgentMetadata(_common.BaseModel): tool_declarations: Optional[genai_types.ToolListUnion] = Field( default=None, description="""List of tools used by the Agent.""" ) - sub_agent_names: Optional[list[str]] = Field( - default=None, description="""List of sub-agent names.""" - ) -class AgentMetadataDict(TypedDict, total=False): - """AgentMetadata for agent eval.""" +class AgentInfoDict(TypedDict, total=False): + """The agent info of an agent, used for agent eval.""" name: Optional[str] """Agent name, used as an identifier.""" @@ -9961,11 +10735,8 @@ class AgentMetadataDict(TypedDict, total=False): tool_declarations: Optional[genai_types.ToolListUnionDict] """List of tools used by the Agent.""" - sub_agent_names: Optional[list[str]] - """List of sub-agent names.""" - -AgentMetadataOrDict = Union[AgentMetadata, AgentMetadataDict] +AgentInfoOrDict = Union[AgentInfo, AgentInfoDict] class ContentMapContents(_common.BaseModel): @@ -10197,11 +10968,11 @@ class EvalCase(_common.BaseModel): ) intermediate_events: Optional[list[Event]] = Field( default=None, - description="""Intermediate events of a single turn in agent eval or intermediate events of the last turn for multi-turn agent eval.""", + description="""This field is experimental and may change in future versions. Intermediate events of a single turn in an agent run or intermediate events of the last turn for multi-turn an agent run.""", ) - agent_metadata: Optional[dict[str, AgentMetadata]] = Field( + agent_info: Optional[AgentInfo] = Field( default=None, - description="""Agent metadata for agent eval, keyed by agent name. This can be extended for multi-agent evaluation.""", + description="""This field is experimental and may change in future versions. The agent info of the agent under evaluation. This can be extended for multi-agent evaluation.""", ) # Allow extra fields to support custom metric prompts and stay backward compatible. model_config = ConfigDict(frozen=True, extra="allow") @@ -10232,10 +11003,10 @@ class EvalCaseDict(TypedDict, total=False): """Unique identifier for the evaluation case.""" intermediate_events: Optional[list[EventDict]] - """Intermediate events of a single turn in agent eval or intermediate events of the last turn for multi-turn agent eval.""" + """This field is experimental and may change in future versions. Intermediate events of a single turn in an agent run or intermediate events of the last turn for multi-turn an agent run.""" - agent_metadata: Optional[dict[str, AgentMetadataDict]] - """Agent metadata for agent eval, keyed by agent name. This can be extended for multi-agent evaluation.""" + agent_info: Optional[AgentInfoDict] + """This field is experimental and may change in future versions. The agent info of the agent under evaluation. This can be extended for multi-agent evaluation.""" EvalCaseOrDict = Union[EvalCase, EvalCaseDict] @@ -10604,6 +11375,34 @@ class EvaluationResultDict(TypedDict, total=False): EvaluationResultOrDict = Union[EvaluationResult, EvaluationResultDict] +class SessionInput(_common.BaseModel): + """This field is experimental and may change in future versions. + + Input to initialize a session and run an agent, used for agent evaluation. + """ + + user_id: Optional[str] = Field(default=None, description="""The user id.""") + state: Optional[dict[str, str]] = Field( + default=None, description="""The state of the session.""" + ) + + +class SessionInputDict(TypedDict, total=False): + """This field is experimental and may change in future versions. + + Input to initialize a session and run an agent, used for agent evaluation. + """ + + user_id: Optional[str] + """The user id.""" + + state: Optional[dict[str, str]] + """The state of the session.""" + + +SessionInputOrDict = Union[SessionInput, SessionInputDict] + + class WinRateStats(_common.BaseModel): """Statistics for win rates for a single metric.""" diff --git a/vertexai/agent_engines/templates/adk.py b/vertexai/agent_engines/templates/adk.py index 4f1b0cbfaa..6ed30956f5 100644 --- a/vertexai/agent_engines/templates/adk.py +++ b/vertexai/agent_engines/templates/adk.py @@ -169,6 +169,9 @@ def __init__(self, **kwargs): self.user_id: Optional[str] = kwargs.get("user_id", _DEFAULT_USER_ID) # The user ID. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. + class _StreamingRunResponse: """Response object for `streaming_agent_run_with_events` method. @@ -181,6 +184,8 @@ def __init__(self, **kwargs): # List of generated events. self.artifacts: Optional[List[_Artifact]] = kwargs.get("artifacts") # List of artifacts belonging to the session. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. def dump(self) -> Dict[str, Any]: from vertexai.agent_engines import _utils @@ -194,6 +199,8 @@ def dump(self) -> Dict[str, Any]: result["events"].append(event_dict) if self.artifacts: result["artifacts"] = [artifact.dump() for artifact in self.artifacts] + if self.session_id: + result["session_id"] = self.session_id return result @@ -402,7 +409,10 @@ async def _init_session( auth = _Authorization(**auth) session_state[f"temp:{auth_id}"] = auth.access_token - session_id = f"temp_session_{random.randbytes(8).hex()}" + if request.session_id: + session_id = request.session_id + else: + session_id = f"temp_session_{random.randbytes(8).hex()}" session = await session_service.create_session( app_name=self._tmpl_attrs.get("app_name"), user_id=request.user_id, @@ -450,7 +460,9 @@ async def _convert_response_events( """Converts the events to the streaming run response object.""" import collections - result = _StreamingRunResponse(events=events, artifacts=[]) + result = _StreamingRunResponse( + events=events, artifacts=[], session_id=session_id + ) # Save the generated artifacts into the result object. artifact_versions = collections.defaultdict(list) @@ -537,15 +549,27 @@ def set_up(self): if session_service_builder: self._tmpl_attrs["session_service"] = session_service_builder() elif "GOOGLE_CLOUD_AGENT_ENGINE_ID" in os.environ: - from google.adk.sessions.vertex_ai_session_service import ( - VertexAiSessionService, - ) + try: + from google.adk.sessions.vertex_ai_session_service import ( + VertexAiSessionService, + ) + + self._tmpl_attrs["session_service"] = VertexAiSessionService( + project=project, + location=location, + agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), + ) + except ImportError: + from google.adk.sessions.vertex_ai_session_service_g3 import ( + VertexAiSessionService, + ) + + self._tmpl_attrs["session_service"] = VertexAiSessionService( + project=project, + location=location, + agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), + ) - self._tmpl_attrs["session_service"] = VertexAiSessionService( - project=project, - location=location, - agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), - ) else: self._tmpl_attrs["session_service"] = InMemorySessionService() @@ -680,22 +704,35 @@ async def streaming_agent_run_with_events(self, request_json: str): request = _StreamRunRequest(**json.loads(request_json)) if not self._tmpl_attrs.get("in_memory_runner"): self.set_up() - if not self._tmpl_attrs.get("artifact_service"): - self.set_up() # Prepare the in-memory session. if not self._tmpl_attrs.get("in_memory_artifact_service"): self.set_up() if not self._tmpl_attrs.get("in_memory_session_service"): self.set_up() - session = await self._init_session( - session_service=self._tmpl_attrs.get("in_memory_session_service"), - artifact_service=self._tmpl_attrs.get("in_memory_artifact_service"), - request=request, - ) + session_service = self._tmpl_attrs.get("in_memory_session_service") + artifact_service = self._tmpl_attrs.get("in_memory_artifact_service") + # Try to get the session, if it doesn't exist, create a new one. + session = None + if request.session_id: + try: + session = await session_service.get_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=request.session_id, + ) + except RuntimeError: + pass + if not session: + # Fall back to create session if the session is not found. + session = await self._init_session( + session_service=session_service, + artifact_service=artifact_service, + request=request, + ) if not session: raise RuntimeError("Session initialization failed.") - # Run the agent. + # Run the agent message_for_agent = types.Content(**request.message) try: async for event in self._tmpl_attrs.get("in_memory_runner").run_async( @@ -707,15 +744,16 @@ async def streaming_agent_run_with_events(self, request_json: str): user_id=request.user_id, session_id=session.id, events=[event], - artifact_service=self._tmpl_attrs.get("in_memory_artifact_service"), + artifact_service=artifact_service, ) yield converted_event finally: - await self._tmpl_attrs.get("in_memory_session_service").delete_session( - app_name=self._tmpl_attrs.get("app_name"), - user_id=request.user_id, - session_id=session.id, - ) + if session and not request.session_id: + await session_service.delete_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=session.id, + ) async def async_get_session( self, diff --git a/vertexai/preview/reasoning_engines/templates/adk.py b/vertexai/preview/reasoning_engines/templates/adk.py index 451656d334..39c799a3aa 100644 --- a/vertexai/preview/reasoning_engines/templates/adk.py +++ b/vertexai/preview/reasoning_engines/templates/adk.py @@ -183,6 +183,9 @@ def __init__(self, **kwargs): self.user_id: Optional[str] = kwargs.get("user_id", _DEFAULT_USER_ID) # The user ID. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. + class _StreamingRunResponse: """Response object for `streaming_agent_run_with_events` method. @@ -195,6 +198,8 @@ def __init__(self, **kwargs): # List of generated events. self.artifacts: Optional[List[_Artifact]] = kwargs.get("artifacts") # List of artifacts belonging to the session. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. def dump(self) -> Dict[str, Any]: from vertexai.agent_engines import _utils @@ -208,6 +213,8 @@ def dump(self) -> Dict[str, Any]: result["events"].append(event_dict) if self.artifacts: result["artifacts"] = [artifact.dump() for artifact in self.artifacts] + if self.session_id: + result["session_id"] = self.session_id return result @@ -383,7 +390,10 @@ async def _init_session( auth = _Authorization(**auth) session_state[f"temp:{auth_id}"] = auth.access_token - session_id = f"temp_session_{random.randbytes(8).hex()}" + if request.session_id: + session_id = request.session_id + else: + session_id = f"temp_session_{random.randbytes(8).hex()}" session = await session_service.create_session( app_name=self._tmpl_attrs.get("app_name"), user_id=request.user_id, @@ -431,7 +441,9 @@ async def _convert_response_events( """Converts the events to the streaming run response object.""" import collections - result = _StreamingRunResponse(events=events, artifacts=[]) + result = _StreamingRunResponse( + events=events, artifacts=[], session_id=session_id + ) # Save the generated artifacts into the result object. artifact_versions = collections.defaultdict(list) @@ -512,21 +524,33 @@ def set_up(self): if session_service_builder: self._tmpl_attrs["session_service"] = session_service_builder() elif "GOOGLE_CLOUD_AGENT_ENGINE_ID" in os.environ: - from google.adk.sessions.vertex_ai_session_service import ( - VertexAiSessionService, - ) + try: + from google.adk.sessions.vertex_ai_session_service import ( + VertexAiSessionService, + ) - if is_version_sufficient("1.5.0"): - self._tmpl_attrs["session_service"] = VertexAiSessionService( - project=project, - location=location, - agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), + if is_version_sufficient("1.5.0"): + self._tmpl_attrs["session_service"] = VertexAiSessionService( + project=project, + location=location, + agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), + ) + else: + self._tmpl_attrs["session_service"] = VertexAiSessionService( + project=project, + location=location, + ) + except ImportError: + from google.adk.sessions.vertex_ai_session_service_g3 import ( + VertexAiSessionService, ) - else: + self._tmpl_attrs["session_service"] = VertexAiSessionService( project=project, location=location, + agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), ) + else: self._tmpl_attrs["session_service"] = InMemorySessionService() @@ -730,21 +754,33 @@ async def _invoke_agent_async(): request = _StreamRunRequest(**json.loads(request_json)) if not self._tmpl_attrs.get("in_memory_runner"): self.set_up() - if not self._tmpl_attrs.get("artifact_service"): - self.set_up() # Prepare the in-memory session. if not self._tmpl_attrs.get("in_memory_artifact_service"): self.set_up() if not self._tmpl_attrs.get("in_memory_session_service"): self.set_up() - session = await self._init_session( - session_service=self._tmpl_attrs.get("in_memory_session_service"), - artifact_service=self._tmpl_attrs.get("in_memory_artifact_service"), - request=request, - ) + session_service = self._tmpl_attrs.get("in_memory_session_service") + artifact_service = self._tmpl_attrs.get("in_memory_artifact_service") + # Try to get the session, if it doesn't exist, create a new one. + session = None + if request.session_id: + try: + session = await session_service.get_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=request.session_id, + ) + except RuntimeError: + pass + if not session: + # Fall back to create session if the session is not found. + session = await self._init_session( + session_service=session_service, + artifact_service=artifact_service, + request=request, + ) if not session: raise RuntimeError("Session initialization failed.") - # Run the agent. message_for_agent = types.Content(**request.message) try: @@ -757,17 +793,16 @@ async def _invoke_agent_async(): user_id=request.user_id, session_id=session.id, events=[event], - artifact_service=self._tmpl_attrs.get( - "in_memory_artifact_service" - ), + artifact_service=artifact_service, ) event_queue.put(converted_event) finally: - await self._tmpl_attrs.get("in_memory_session_service").delete_session( - app_name=self._tmpl_attrs.get("app_name"), - user_id=request.user_id, - session_id=session.id, - ) + if session and not request.session_id: + await session_service.delete_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=session.id, + ) def _asyncio_thread_main(): try: