From 0c932b99bafde099734cf136828583de23fbaeb6 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Mon, 6 Oct 2025 08:50:33 -0700 Subject: [PATCH 01/11] feat: GenAI Client(evals) - Add `show` method for EvaluationRun class in Vertex AI GenAI SDK evals PiperOrigin-RevId: 815745954 --- .../genai/replays/test_get_evaluation_run.py | 33 ++++++++++++++++ vertexai/_genai/_evals_visualization.py | 39 +++++++++++++++++++ vertexai/_genai/types.py | 14 +++++++ 3 files changed, 86 insertions(+) diff --git a/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py b/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py index e4e28af4f0..d28cca7a3e 100644 --- a/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py +++ b/tests/unit/vertexai/genai/replays/test_get_evaluation_run.py @@ -16,6 +16,7 @@ from tests.unit.vertexai.genai.replays import pytest_helper from vertexai import types +from vertexai._genai import _evals_visualization import datetime import pytest @@ -188,6 +189,38 @@ def check_run_1957799200510967808( ) ) assert evaluation_run.error is None + eval_result = _evals_visualization._get_eval_result_from_eval_run( + evaluation_run.evaluation_results + ) + assert isinstance(eval_result, types.EvaluationResult) + assert eval_result.summary_metrics == [ + types.AggregatedMetricResult( + metric_name="checkpoint_1/universal", + mean_score=0.986633250587865, + stdev_score=0.0393092386127714, + ), + types.AggregatedMetricResult( + metric_name="checkpoint_2/universal", + mean_score=0.9438178790243048, + stdev_score=0.07597187617837561, + ), + types.AggregatedMetricResult( + metric_name="gemini-2.0-flash-001@default/universal", + mean_score=0.6943817985685249, + stdev_score=0.17738341388587855, + ), + types.AggregatedMetricResult( + metric_name="checkpoint_1/user_defined", mean_score=5, stdev_score=0 + ), + types.AggregatedMetricResult( + metric_name="checkpoint_2/user_defined", mean_score=5, stdev_score=0 + ), + types.AggregatedMetricResult( + metric_name="gemini-2.0-flash-001@default/user_defined", + mean_score=4.736842105263158, + stdev_score=0.6359497880839245, + ), + ] pytestmark = pytest_helper.setup( diff --git a/vertexai/_genai/_evals_visualization.py b/vertexai/_genai/_evals_visualization.py index 477b6b40ee..aa2a41c6f3 100644 --- a/vertexai/_genai/_evals_visualization.py +++ b/vertexai/_genai/_evals_visualization.py @@ -727,3 +727,42 @@ def display_evaluation_dataset(eval_dataset_obj: types.EvaluationDataset) -> Non dataframe_json_string = json.dumps(processed_rows, ensure_ascii=False, default=str) html_content = _get_inference_html(dataframe_json_string) display.display(display.HTML(html_content)) + + +def _get_eval_result_from_eval_run( + results: types.EvaluationRunResults, +) -> types.EvaluationResult: + """Retrieves an EvaluationResult from the resource name.""" + if ( + not results + or not results.summary_metrics + or not results.summary_metrics.metrics + ): + return types.EvaluationResult() + + aggregated_metrics_dict = {} + for name, value in results.summary_metrics.metrics.items(): + result = name.rsplit("/", 1) + full_metric_name = result[0] + aggregated_metric_name = result[1] + if full_metric_name not in aggregated_metrics_dict: + aggregated_metrics_dict[full_metric_name] = {} + aggregated_metrics_dict[full_metric_name]["sub_metric_name"] = ( + full_metric_name.split("/")[-1] + ) + aggregated_metrics_dict[full_metric_name][aggregated_metric_name] = value + + items_sorted = sorted( + aggregated_metrics_dict.items(), + key=lambda item: (item[1]["sub_metric_name"], item[0]), + ) + + aggregated_metrics = [ + types.AggregatedMetricResult( + metric_name=name, + mean_score=values.get("AVERAGE"), + stdev_score=values.get("STANDARD_DEVIATION"), + ) + for name, values in items_sorted + ] + return types.EvaluationResult(summary_metrics=aggregated_metrics) diff --git a/vertexai/_genai/types.py b/vertexai/_genai/types.py index 5e3aaa2e90..3a5b8cab75 100644 --- a/vertexai/_genai/types.py +++ b/vertexai/_genai/types.py @@ -578,6 +578,20 @@ class EvaluationRun(_common.BaseModel): default=None, description="""The results for the evaluation run.""" ) + def show(self) -> None: + """Shows the evaluation result.""" + from . import _evals_visualization + + if self.state == "SUCCEEDED": + eval_result = _evals_visualization._get_eval_result_from_eval_run( + self.evaluation_results + ) + _evals_visualization.display_evaluation_result(eval_result, None) + else: + logger.warning(f"Evaluation Run state: {self.state}.") + if self.error: + logger.warning(f"Evaluation Run error: {self.error.message}") + class EvaluationRunDict(TypedDict, total=False): """Represents an evaluation run.""" From 77578867ff7c4e8a9c4618481821cded32b4b135 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Mon, 6 Oct 2025 11:09:32 -0700 Subject: [PATCH 02/11] feat: GenAI Client(evals) - Add `get_evaluation_set` and `get_evaluation_item` methods to Vertex AI GenAI SDK evals PiperOrigin-RevId: 815805880 --- tests/unit/vertexai/genai/replays/conftest.py | 24 +- .../genai/replays/test_get_evaluation_item.py | 146 ++++++ .../genai/replays/test_get_evaluation_set.py | 89 ++++ vertexai/_genai/_evals_common.py | 34 ++ vertexai/_genai/evals.py | 382 +++++++++++++++ vertexai/_genai/types.py | 458 ++++++++++++++++++ 6 files changed, 1126 insertions(+), 7 deletions(-) create mode 100644 tests/unit/vertexai/genai/replays/test_get_evaluation_item.py create mode 100644 tests/unit/vertexai/genai/replays/test_get_evaluation_set.py diff --git a/tests/unit/vertexai/genai/replays/conftest.py b/tests/unit/vertexai/genai/replays/conftest.py index f1c963a3ca..0cd57473a0 100644 --- a/tests/unit/vertexai/genai/replays/conftest.py +++ b/tests/unit/vertexai/genai/replays/conftest.py @@ -131,29 +131,39 @@ def _get_replay_id(use_vertex: bool, replays_prefix: str) -> str: EVAL_CONFIG_GCS_URI = ( "gs://vertex-ai-generative-ai-eval-sdk-resources/metrics/text_quality/v1.0.0.yaml" ) +EVAL_ITEM_REQUEST_GCS_URI = ( + "gs://lakeyk-limited-bucket/agora_eval_080525/request_4813679498589372416.json" +) +EVAL_ITEM_RESULT_GCS_URI = ( + "gs://lakeyk-limited-bucket/agora_eval_080525/result_1486082323915997184.json" +) +EVAL_GCS_URI_ITEMS = { + EVAL_CONFIG_GCS_URI: "test_resources/mock_eval_config.yaml", + EVAL_ITEM_REQUEST_GCS_URI: "test_resources/request_4813679498589372416.json", + EVAL_ITEM_RESULT_GCS_URI: "test_resources/result_1486082323915997184.json", +} def _mock_read_file_contents_side_effect(uri: str): """ Side effect to mock GcsUtils.read_file_contents for eval test test_batch_evaluate. """ - if uri == EVAL_CONFIG_GCS_URI: + if uri in EVAL_GCS_URI_ITEMS: # Construct the absolute path to the local mock file. current_dir = os.path.dirname(__file__) - local_yaml_path = os.path.join( - current_dir, "test_resources/mock_eval_config.yaml" - ) + local_mock_file_path = os.path.join(current_dir, EVAL_GCS_URI_ITEMS[uri]) try: - with open(local_yaml_path, "r") as f: + with open(local_mock_file_path, "r") as f: return f.read() except FileNotFoundError: raise FileNotFoundError( - "The mock data file 'mock_eval_config.yaml' was not found." + f"The mock data file '{EVAL_GCS_URI_ITEMS[uri]}' was not found." ) raise ValueError( f"Unexpected GCS URI '{uri}' in replay test. Only " - f"'{EVAL_CONFIG_GCS_URI}' is mocked." + f"'{EVAL_CONFIG_GCS_URI}', '{EVAL_ITEM_REQUEST_GCS_URI}', and " + f"'{EVAL_ITEM_RESULT_GCS_URI}' are mocked." ) diff --git a/tests/unit/vertexai/genai/replays/test_get_evaluation_item.py b/tests/unit/vertexai/genai/replays/test_get_evaluation_item.py new file mode 100644 index 0000000000..687f5a3a16 --- /dev/null +++ b/tests/unit/vertexai/genai/replays/test_get_evaluation_item.py @@ -0,0 +1,146 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# pylint: disable=protected-access,bad-continuation,missing-function-docstring + +from tests.unit.vertexai.genai.replays import pytest_helper +from vertexai import types +import datetime +import pytest + + +def test_get_eval_item_response(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem.""" + evaluation_item_name = "projects/503583131166/locations/us-central1/evaluationItems/1486082323915997184" + evaluation_item = client.evals.get_evaluation_item(name=evaluation_item_name) + assert isinstance(evaluation_item, types.EvaluationItem) + check_item_1486082323915997184(evaluation_item, evaluation_item_name) + + +def test_get_eval_item_request(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem with request.""" + evaluation_item_name = "projects/503583131166/locations/us-central1/evaluationItems/4813679498589372416" + evaluation_item = client.evals.get_evaluation_item(name=evaluation_item_name) + assert isinstance(evaluation_item, types.EvaluationItem) + check_item_4813679498589372416(evaluation_item, evaluation_item_name) + + +pytest_plugins = ("pytest_asyncio",) + + +@pytest.mark.asyncio +async def test_get_eval_item_response_async(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem.""" + eval_item_id = "1486082323915997184" + evaluation_item_name = ( + f"projects/503583131166/locations/us-central1/evaluationItems/{eval_item_id}" + ) + evaluation_item = await client.aio.evals.get_evaluation_item(name=eval_item_id) + check_item_1486082323915997184(evaluation_item, evaluation_item_name) + + +@pytest.mark.asyncio +async def test_get_eval_item_request_async(client): + """Tests that get_evaluation_item() returns a correctly structured EvaluationItem with request.""" + eval_item_id = "4813679498589372416" + evaluation_item_name = ( + f"projects/503583131166/locations/us-central1/evaluationItems/{eval_item_id}" + ) + evaluation_item = await client.aio.evals.get_evaluation_item(name=eval_item_id) + check_item_4813679498589372416(evaluation_item, evaluation_item_name) + + +def check_item_1486082323915997184( + evaluation_item: types.EvaluationItem, evaluation_item_name: str +): + assert evaluation_item.name == evaluation_item_name + assert evaluation_item.display_name == "universal result for 7119522507803066368" + assert evaluation_item.evaluation_item_type == types.EvaluationItemType.RESULT + assert ( + evaluation_item.gcs_uri + == "gs://lakeyk-limited-bucket/agora_eval_080525/result_1486082323915997184.json" + ) + assert evaluation_item.create_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 713792, tzinfo=datetime.timezone.utc + ) + assert isinstance(evaluation_item.evaluation_response, types.EvaluationItemResult) + assert ( + evaluation_item.evaluation_response.evaluation_request + == "projects/503583131166/locations/us-central1/evaluationItems/7119522507803066368" + ) + assert ( + evaluation_item.evaluation_response.evaluation_run + == "projects/503583131166/locations/us-central1/evaluationRuns/1957799200510967808" + ) + # Check the first candidate result. + candidate_result = evaluation_item.evaluation_response.candidate_results[0] + assert candidate_result.candidate == "gemini-2.0-flash-001@default" + assert candidate_result.metric == "universal" + assert candidate_result.score == 0.2857143 + # Check the first rubric verdict. + rubric_verdict = candidate_result.rubric_verdicts[0] + assert rubric_verdict.verdict + assert ( + rubric_verdict.reasoning + == "The entire response is written in the English language." + ) + assert rubric_verdict.evaluated_rubric.type == "LANGUAGE:PRIMARY_RESPONSE_LANGUAGE" + assert rubric_verdict.evaluated_rubric.importance == "HIGH" + assert ( + rubric_verdict.evaluated_rubric.content.property.description + == "The response is in English." + ) + # Check the request. + request = evaluation_item.evaluation_response.request + assert ( + "There is a wide range of potato varieties to choose from" + in request.prompt.text + ) + assert request.candidate_responses[0].candidate == "gemini-2.0-flash-001@default" + assert "Pick out your potato variety" in request.candidate_responses[0].text + + +def check_item_4813679498589372416( + evaluation_item: types.EvaluationItem, evaluation_item_name: str +): + assert evaluation_item.name == evaluation_item_name + assert evaluation_item.display_name == "4813679498589372416" + assert evaluation_item.evaluation_item_type == types.EvaluationItemType.REQUEST + assert ( + evaluation_item.gcs_uri + == "gs://lakeyk-limited-bucket/agora_eval_080525/request_4813679498589372416.json" + ) + assert evaluation_item.create_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 338353, tzinfo=datetime.timezone.utc + ) + assert isinstance(evaluation_item.evaluation_request, types.EvaluationItemRequest) + # Check the request. + request = evaluation_item.evaluation_request + assert ( + "If your ball is curving during flight from left to right" + in request.prompt.text + ) + # Check the first candidate response. + assert request.candidate_responses[0].candidate == "gemini-2.0-flash-001@default" + assert ( + "Keep your knees bent during the backswing" + in request.candidate_responses[0].text + ) + + +pytestmark = pytest_helper.setup( + file=__file__, + globals_for_file=globals(), + test_method="evals.get_evaluation_item", +) diff --git a/tests/unit/vertexai/genai/replays/test_get_evaluation_set.py b/tests/unit/vertexai/genai/replays/test_get_evaluation_set.py new file mode 100644 index 0000000000..37b99f1486 --- /dev/null +++ b/tests/unit/vertexai/genai/replays/test_get_evaluation_set.py @@ -0,0 +1,89 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# pylint: disable=protected-access,bad-continuation,missing-function-docstring + +from tests.unit.vertexai.genai.replays import pytest_helper +from vertexai import types +import datetime +import pytest + + +def test_get_eval_set(client): + """Tests that get_evaluation_set() returns a correctly structured EvaluationSet.""" + evaluation_set_name = ( + "projects/503583131166/locations/us-central1/evaluationSets/102386522778501120" + ) + evaluation_set = client.evals.get_evaluation_set(name=evaluation_set_name) + assert isinstance(evaluation_set, types.EvaluationSet) + check_set_102386522778501120(evaluation_set, evaluation_set_name) + + +pytest_plugins = ("pytest_asyncio",) + + +@pytest.mark.asyncio +async def test_get_eval_set_async(client): + """Tests that get_evaluation_set() returns a correctly structured EvaluationSet.""" + eval_set_id = "102386522778501120" + evaluation_set_name = ( + f"projects/503583131166/locations/us-central1/evaluationSets/{eval_set_id}" + ) + evaluation_set = await client.aio.evals.get_evaluation_set(name=eval_set_id) + check_set_102386522778501120(evaluation_set, evaluation_set_name) + + +def check_set_102386522778501120( + evaluation_set: types.EvaluationSet, evaluation_set_name: str +): + assert evaluation_set.name == evaluation_set_name + assert ( + evaluation_set.display_name + == "Results Set for EvaluationRun 1957799200510967808" + ) + assert evaluation_set.evaluation_items == [ + "projects/503583131166/locations/us-central1/evaluationItems/2748216119486578688", + "projects/503583131166/locations/us-central1/evaluationItems/1486082323915997184", + "projects/503583131166/locations/us-central1/evaluationItems/2219043163270545408", + "projects/503583131166/locations/us-central1/evaluationItems/8570244537769787392", + "projects/503583131166/locations/us-central1/evaluationItems/2112082672120496128", + "projects/503583131166/locations/us-central1/evaluationItems/8192505119024087040", + "projects/503583131166/locations/us-central1/evaluationItems/1383625432393318400", + "projects/503583131166/locations/us-central1/evaluationItems/5832267070561058816", + "projects/503583131166/locations/us-central1/evaluationItems/1733991409653907456", + "projects/503583131166/locations/us-central1/evaluationItems/2549142942207967232", + "projects/503583131166/locations/us-central1/evaluationItems/8565740938142416896", + "projects/503583131166/locations/us-central1/evaluationItems/6069620844672319488", + "projects/503583131166/locations/us-central1/evaluationItems/7777822109585113088", + "projects/503583131166/locations/us-central1/evaluationItems/5656415578861076480", + "projects/503583131166/locations/us-central1/evaluationItems/5926842662735839232", + "projects/503583131166/locations/us-central1/evaluationItems/648623899457617920", + "projects/503583131166/locations/us-central1/evaluationItems/4349245787016790016", + "projects/503583131166/locations/us-central1/evaluationItems/1119038954285301760", + "projects/503583131166/locations/us-central1/evaluationItems/5741983971781115904", + ] + assert evaluation_set.create_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 413954, tzinfo=datetime.timezone.utc + ) + assert evaluation_set.update_time == datetime.datetime( + 2025, 9, 8, 20, 55, 46, 413954, tzinfo=datetime.timezone.utc + ) + assert evaluation_set.metadata is None + + +pytestmark = pytest_helper.setup( + file=__file__, + globals_for_file=globals(), + test_method="evals.get_evaluation_set", +) diff --git a/vertexai/_genai/_evals_common.py b/vertexai/_genai/_evals_common.py index 7a998db1e2..8e51c02a40 100644 --- a/vertexai/_genai/_evals_common.py +++ b/vertexai/_genai/_evals_common.py @@ -975,3 +975,37 @@ def _execute_evaluation( "Evaluation results uploaded successfully to GCS: %s", uploaded_path ) return evaluation_result + + +def _convert_gcs_to_evaluation_item_result( + api_client: BaseApiClient, + gcs_uri: str, +) -> types.EvaluationItemResult: + """Converts a json file to an EvaluationItemResult.""" + logger.info("Loading evaluation item result from GCS: %s", gcs_uri) + gcs_utils = _evals_utils.GcsUtils(api_client=api_client) + try: + eval_item_data = json.loads(gcs_utils.read_file_contents(gcs_uri)) + return types.EvaluationItemResult(**eval_item_data) + except Exception as e: + logger.error( + "Failed to load evaluation result from GCS: %s. Error: %s", gcs_uri, e + ) + return types.EvaluationItemResult() + + +def _convert_gcs_to_evaluation_item_request( + api_client: BaseApiClient, + gcs_uri: str, +) -> types.EvaluationItemRequest: + """Converts a json file to an EvaluationItemRequest.""" + logger.info("Loading evaluation item request from GCS: %s", gcs_uri) + gcs_utils = _evals_utils.GcsUtils(api_client=api_client) + try: + eval_item_data = json.loads(gcs_utils.read_file_contents(gcs_uri)) + return types.EvaluationItemRequest(**eval_item_data) + except Exception as e: + logger.error( + "Failed to load evaluation request from GCS: %s. Error: %s", gcs_uri, e + ) + return types.EvaluationItemRequest() diff --git a/vertexai/_genai/evals.py b/vertexai/_genai/evals.py index 5e792d1936..78ddccd49f 100644 --- a/vertexai/_genai/evals.py +++ b/vertexai/_genai/evals.py @@ -173,6 +173,20 @@ def _GenerateInstanceRubricsRequest_to_vertex( return to_object +def _GetEvaluationItemParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + def _GetEvaluationRunParameters_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -187,6 +201,20 @@ def _GetEvaluationRunParameters_to_vertex( return to_object +def _GetEvaluationSetParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv(to_object, ["config"], getv(from_object, ["config"])) + + return to_object + + def _RubricBasedMetricInput_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -527,6 +555,106 @@ def _get_evaluation_run( self._api_client._verify_response(return_value) return return_value + def _get_evaluation_set( + self, *, name: str, config: Optional[types.GetEvaluationSetConfigOrDict] = None + ) -> types.EvaluationSet: + """ + Retrieves an EvaluationSet from the resource name. + """ + + parameter_model = types._GetEvaluationSetParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationSetParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationSets/{name}".format_map(request_url_dict) + else: + path = "evaluationSets/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationSet._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + def _get_evaluation_item( + self, *, name: str, config: Optional[types.GetEvaluationItemConfigOrDict] = None + ) -> types.EvaluationItem: + """ + Retrieves an EvaluationItem from the resource name. + """ + + parameter_model = types._GetEvaluationItemParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationItemParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationItems/{name}".format_map(request_url_dict) + else: + path = "evaluationItems/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("get", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationItem._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + def run(self) -> types.EvaluateInstancesResponse: """Evaluates an instance of a model. @@ -955,6 +1083,80 @@ def create_evaluation_run( config=config, ) + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_set method is experimental, " + "and may change in future versions." + ) + def get_evaluation_set( + self, + *, + name: str, + config: Optional[types.GetEvaluationSetConfigOrDict] = None, + ) -> types.EvaluationSet: + """Retrieves an EvaluationSet from the resource name. + + Args: + name: The resource name of the EvaluationSet. Format: + `projects/{project}/locations/{location}/evaluationSets/{evaluation_set}` + config: The optional configuration for the evaluation set. Must be a dict or + `types.GetEvaluationSetConfigOrDict` type. + + Returns: + The evaluation set. + """ + + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + return self._get_evaluation_set(name=name, config=config) + + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_item method is experimental, " + "and may change in future versions." + ) + def get_evaluation_item( + self, + *, + name: str, + config: Optional[types.GetEvaluationItemConfigOrDict] = None, + ) -> types.EvaluationItem: + """Retrieves an EvaluationItem from the resource name. + + Args: + name: The resource name of the EvaluationItem. Format: + `projects/{project}/locations/{location}/evaluationItems/{evaluation_item}` + config: The optional configuration for the evaluation item. Must be a dict or + `types.GetEvaluationItemConfigOrDict` type. + + Returns: + The evaluation item. + """ + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + result = self._get_evaluation_item(name=name, config=config) + if ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.RESULT + ): + result.evaluation_response = ( + _evals_common._convert_gcs_to_evaluation_item_result( + self._api_client, result.gcs_uri + ) + ) + elif ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.REQUEST + ): + result.evaluation_request = ( + _evals_common._convert_gcs_to_evaluation_item_request( + self._api_client, result.gcs_uri + ) + ) + return result + class AsyncEvals(_api_module.BaseModule): @@ -1217,6 +1419,110 @@ async def _get_evaluation_run( self._api_client._verify_response(return_value) return return_value + async def _get_evaluation_set( + self, *, name: str, config: Optional[types.GetEvaluationSetConfigOrDict] = None + ) -> types.EvaluationSet: + """ + Retrieves an EvaluationSet from the resource name. + """ + + parameter_model = types._GetEvaluationSetParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationSetParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationSets/{name}".format_map(request_url_dict) + else: + path = "evaluationSets/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationSet._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + async def _get_evaluation_item( + self, *, name: str, config: Optional[types.GetEvaluationItemConfigOrDict] = None + ) -> types.EvaluationItem: + """ + Retrieves an EvaluationItem from the resource name. + """ + + parameter_model = types._GetEvaluationItemParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _GetEvaluationItemParameters_to_vertex(parameter_model) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "evaluationItems/{name}".format_map(request_url_dict) + else: + path = "evaluationItems/{name}" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "get", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.EvaluationItem._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + async def batch_evaluate( self, *, @@ -1355,3 +1661,79 @@ async def create_evaluation_run( ) return result + + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_set method is experimental, " + "and may change in future versions." + ) + async def get_evaluation_set( + self, + *, + name: str, + config: Optional[types.GetEvaluationSetConfigOrDict] = None, + ) -> types.EvaluationSet: + """Retrieves an EvaluationSet from the resource name. + + Args: + name: The resource name of the EvaluationSet. Format: + `projects/{project}/locations/{location}/evaluationSets/{evaluation_set}` + config: The optional configuration for the evaluation set. Must be a dict or + `types.GetEvaluationSetConfigOrDict` type. + + Returns: + The evaluation set. + """ + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + result = await self._get_evaluation_set(name=name, config=config) + + return result + + @_common.experimental_warning( + "The Vertex SDK GenAI evals.get_evaluation_item method is experimental, " + "and may change in future versions." + ) + async def get_evaluation_item( + self, + *, + name: str, + config: Optional[types.GetEvaluationItemConfigOrDict] = None, + ) -> types.EvaluationItem: + """Retrieves an EvaluationItem from the resource name. + + Args: + name: The resource name of the EvaluationItem. Format: + `projects/{project}/locations/{location}/evaluationItems/{evaluation_item}` + config: The optional configuration for the evaluation item. Must be a dict or + `types.GetEvaluationItemConfigOrDict` type. + + Returns: + The evaluation item. + """ + if not name: + raise ValueError("name cannot be empty.") + if name.startswith("projects/"): + name = name.split("/")[-1] + result = await self._get_evaluation_item(name=name, config=config) + if ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.RESULT + ): + result.evaluation_response = ( + _evals_common._convert_gcs_to_evaluation_item_result( + self._api_client, result.gcs_uri + ) + ) + elif ( + result.gcs_uri + and result.evaluation_item_type == types.EvaluationItemType.REQUEST + ): + result.evaluation_request = ( + _evals_common._convert_gcs_to_evaluation_item_request( + self._api_client, result.gcs_uri + ) + ) + + return result diff --git a/vertexai/_genai/types.py b/vertexai/_genai/types.py index 3a5b8cab75..fda95b4213 100644 --- a/vertexai/_genai/types.py +++ b/vertexai/_genai/types.py @@ -333,6 +333,17 @@ class Importance(_common.CaseInSensitiveEnum): """Low importance.""" +class EvaluationItemType(_common.CaseInSensitiveEnum): + """The type of the EvaluationItem.""" + + EVALUATION_ITEM_TYPE_UNSPECIFIED = "EVALUATION_ITEM_TYPE_UNSPECIFIED" + """The default value. This value is unused.""" + REQUEST = "REQUEST" + """The EvaluationItem is a request to evaluate.""" + RESULT = "RESULT" + """The EvaluationItem is the result of evaluation.""" + + class GenerateMemoriesResponseGeneratedMemoryAction(_common.CaseInSensitiveEnum): """The action to take.""" @@ -2709,6 +2720,453 @@ class _GetEvaluationRunParametersDict(TypedDict, total=False): ] +class GetEvaluationSetConfig(_common.BaseModel): + """Config for get evaluation set.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetEvaluationSetConfigDict(TypedDict, total=False): + """Config for get evaluation set.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +GetEvaluationSetConfigOrDict = Union[GetEvaluationSetConfig, GetEvaluationSetConfigDict] + + +class _GetEvaluationSetParameters(_common.BaseModel): + """Represents a job that gets an evaluation set.""" + + name: Optional[str] = Field(default=None, description="""""") + config: Optional[GetEvaluationSetConfig] = Field(default=None, description="""""") + + +class _GetEvaluationSetParametersDict(TypedDict, total=False): + """Represents a job that gets an evaluation set.""" + + name: Optional[str] + """""" + + config: Optional[GetEvaluationSetConfigDict] + """""" + + +_GetEvaluationSetParametersOrDict = Union[ + _GetEvaluationSetParameters, _GetEvaluationSetParametersDict +] + + +class EvaluationSet(_common.BaseModel): + """Represents an evaluation set.""" + + name: Optional[str] = Field( + default=None, description="""The resource name of the evaluation set.""" + ) + display_name: Optional[str] = Field( + default=None, description="""The display name of the evaluation set.""" + ) + evaluation_items: Optional[list[str]] = Field( + default=None, + description="""The EvaluationItems that are part of this dataset.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, description="""The create time of the evaluation set.""" + ) + update_time: Optional[datetime.datetime] = Field( + default=None, description="""The update time of the evaluation set.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="""The metadata of the evaluation set.""" + ) + + +class EvaluationSetDict(TypedDict, total=False): + """Represents an evaluation set.""" + + name: Optional[str] + """The resource name of the evaluation set.""" + + display_name: Optional[str] + """The display name of the evaluation set.""" + + evaluation_items: Optional[list[str]] + """The EvaluationItems that are part of this dataset.""" + + create_time: Optional[datetime.datetime] + """The create time of the evaluation set.""" + + update_time: Optional[datetime.datetime] + """The update time of the evaluation set.""" + + metadata: Optional[dict[str, Any]] + """The metadata of the evaluation set.""" + + +EvaluationSetOrDict = Union[EvaluationSet, EvaluationSetDict] + + +class GetEvaluationItemConfig(_common.BaseModel): + """Config for get evaluation item.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + + +class GetEvaluationItemConfigDict(TypedDict, total=False): + """Config for get evaluation item.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + +GetEvaluationItemConfigOrDict = Union[ + GetEvaluationItemConfig, GetEvaluationItemConfigDict +] + + +class _GetEvaluationItemParameters(_common.BaseModel): + """Represents a job that gets an evaluation item.""" + + name: Optional[str] = Field(default=None, description="""""") + config: Optional[GetEvaluationItemConfig] = Field(default=None, description="""""") + + +class _GetEvaluationItemParametersDict(TypedDict, total=False): + """Represents a job that gets an evaluation item.""" + + name: Optional[str] + """""" + + config: Optional[GetEvaluationItemConfigDict] + """""" + + +_GetEvaluationItemParametersOrDict = Union[ + _GetEvaluationItemParameters, _GetEvaluationItemParametersDict +] + + +class PromptTemplateData(_common.BaseModel): + """Message to hold a prompt template and the values to populate the template.""" + + values: Optional[dict[str, genai_types.Content]] = Field( + default=None, description="""The values for fields in the prompt template.""" + ) + + +class PromptTemplateDataDict(TypedDict, total=False): + """Message to hold a prompt template and the values to populate the template.""" + + values: Optional[dict[str, genai_types.ContentDict]] + """The values for fields in the prompt template.""" + + +PromptTemplateDataOrDict = Union[PromptTemplateData, PromptTemplateDataDict] + + +class EvaluationPrompt(_common.BaseModel): + """Represents the prompt to be evaluated.""" + + text: Optional[str] = Field(default=None, description="""Text prompt.""") + value: Optional[dict[str, Any]] = Field( + default=None, + description="""Fields and values that can be used to populate the prompt template.""", + ) + prompt_template_data: Optional[PromptTemplateData] = Field( + default=None, description="""Prompt template data.""" + ) + + +class EvaluationPromptDict(TypedDict, total=False): + """Represents the prompt to be evaluated.""" + + text: Optional[str] + """Text prompt.""" + + value: Optional[dict[str, Any]] + """Fields and values that can be used to populate the prompt template.""" + + prompt_template_data: Optional[PromptTemplateDataDict] + """Prompt template data.""" + + +EvaluationPromptOrDict = Union[EvaluationPrompt, EvaluationPromptDict] + + +class CandidateResponse(_common.BaseModel): + """Responses from model or agent.""" + + candidate: Optional[str] = Field( + default=None, + description="""The name of the candidate that produced the response.""", + ) + text: Optional[str] = Field(default=None, description="""The text response.""") + value: Optional[dict[str, Any]] = Field( + default=None, + description="""Fields and values that can be used to populate the response template.""", + ) + events: Optional[list[genai_types.Content]] = Field( + default=None, + description="""Intermediate events (such as tool calls and responses) that led to the final response.""", + ) + + +class CandidateResponseDict(TypedDict, total=False): + """Responses from model or agent.""" + + candidate: Optional[str] + """The name of the candidate that produced the response.""" + + text: Optional[str] + """The text response.""" + + value: Optional[dict[str, Any]] + """Fields and values that can be used to populate the response template.""" + + events: Optional[list[genai_types.ContentDict]] + """Intermediate events (such as tool calls and responses) that led to the final response.""" + + +CandidateResponseOrDict = Union[CandidateResponse, CandidateResponseDict] + + +class EvaluationItemRequest(_common.BaseModel): + """Single evaluation request.""" + + prompt: Optional[EvaluationPrompt] = Field( + default=None, description="""The request/prompt to evaluate.""" + ) + golden_response: Optional[CandidateResponse] = Field( + default=None, description="""The ideal response or ground truth.""" + ) + rubrics: Optional[dict[str, "RubricGroup"]] = Field( + default=None, + description="""Named groups of rubrics associated with this prompt. The key is a user-defined name for the rubric group.""", + ) + candidate_responses: Optional[list[CandidateResponse]] = Field( + default=None, + description="""Responses from model under test and other baseline models for comparison.""", + ) + + +class EvaluationItemRequestDict(TypedDict, total=False): + """Single evaluation request.""" + + prompt: Optional[EvaluationPromptDict] + """The request/prompt to evaluate.""" + + golden_response: Optional[CandidateResponseDict] + """The ideal response or ground truth.""" + + rubrics: Optional[dict[str, "RubricGroupDict"]] + """Named groups of rubrics associated with this prompt. The key is a user-defined name for the rubric group.""" + + candidate_responses: Optional[list[CandidateResponseDict]] + """Responses from model under test and other baseline models for comparison.""" + + +EvaluationItemRequestOrDict = Union[EvaluationItemRequest, EvaluationItemRequestDict] + + +class CandidateResult(_common.BaseModel): + """Result for a single candidate.""" + + candidate: Optional[str] = Field( + default=None, + description="""The candidate that is being evaluated. The value is the same as the candidate name in the EvaluationRequest.""", + ) + metric: Optional[str] = Field( + default=None, description="""The metric that was evaluated.""" + ) + score: Optional[float] = Field( + default=None, description="""The score of the metric.""" + ) + explanation: Optional[str] = Field( + default=None, description="""The explanation for the metric.""" + ) + rubric_verdicts: Optional[list[RubricVerdict]] = Field( + default=None, description="""The rubric verdicts for the metric.""" + ) + additional_results: Optional[dict[str, Any]] = Field( + default=None, description="""Additional results for the metric.""" + ) + + +class CandidateResultDict(TypedDict, total=False): + """Result for a single candidate.""" + + candidate: Optional[str] + """The candidate that is being evaluated. The value is the same as the candidate name in the EvaluationRequest.""" + + metric: Optional[str] + """The metric that was evaluated.""" + + score: Optional[float] + """The score of the metric.""" + + explanation: Optional[str] + """The explanation for the metric.""" + + rubric_verdicts: Optional[list[RubricVerdictDict]] + """The rubric verdicts for the metric.""" + + additional_results: Optional[dict[str, Any]] + """Additional results for the metric.""" + + +CandidateResultOrDict = Union[CandidateResult, CandidateResultDict] + + +class EvaluationItemResult(_common.BaseModel): + """Represents the result of an evaluation item.""" + + evaluation_request: Optional[str] = Field( + default=None, description="""The request item that was evaluated.""" + ) + evaluation_run: Optional[str] = Field( + default=None, + description="""The evaluation run that was used to generate the result.""", + ) + request: Optional[EvaluationItemRequest] = Field( + default=None, description="""The request that was evaluated.""" + ) + metric: Optional[str] = Field( + default=None, description="""The metric that was evaluated.""" + ) + candidate_results: Optional[list[CandidateResult]] = Field( + default=None, description="""TThe results for the metric.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="""Metadata about the evaluation result.""" + ) + + +class EvaluationItemResultDict(TypedDict, total=False): + """Represents the result of an evaluation item.""" + + evaluation_request: Optional[str] + """The request item that was evaluated.""" + + evaluation_run: Optional[str] + """The evaluation run that was used to generate the result.""" + + request: Optional[EvaluationItemRequestDict] + """The request that was evaluated.""" + + metric: Optional[str] + """The metric that was evaluated.""" + + candidate_results: Optional[list[CandidateResultDict]] + """TThe results for the metric.""" + + metadata: Optional[dict[str, Any]] + """Metadata about the evaluation result.""" + + +EvaluationItemResultOrDict = Union[EvaluationItemResult, EvaluationItemResultDict] + + +class EvaluationItem(_common.BaseModel): + """EvaluationItem is a single evaluation request or result. + + The content of an EvaluationItem is immutable - it cannot be updated once + created. EvaluationItems can be deleted when no longer needed. + """ + + name: Optional[str] = Field( + default=None, description="""The resource name of the EvaluationItem.""" + ) + display_name: Optional[str] = Field( + default=None, description="""The display name of the EvaluationItem.""" + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, description="""Metadata for the EvaluationItem.""" + ) + labels: Optional[dict[str, str]] = Field( + default=None, description="""Labels for the EvaluationItem.""" + ) + evaluation_item_type: Optional[EvaluationItemType] = Field( + default=None, description="""The type of the EvaluationItem.""" + ) + evaluation_request: Optional[EvaluationItemRequest] = Field( + default=None, description="""The request to evaluate.""" + ) + evaluation_response: Optional[EvaluationItemResult] = Field( + default=None, description="""The response from evaluation.""" + ) + gcs_uri: Optional[str] = Field( + default=None, + description="""The Cloud Storage object where the request or response is stored.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, description="""Timestamp when this item was created.""" + ) + error: Optional[genai_types.GoogleRpcStatus] = Field( + default=None, description="""Error for the evaluation item.""" + ) + + # TODO(b/448806531): Remove all the overridden _from_response methods once the + # ticket is resolved and published. + @classmethod + def _from_response( + cls: typing.Type["EvaluationItem"], + *, + response: dict[str, object], + kwargs: dict[str, object], + ) -> "EvaluationItem": + """Converts a dictionary response into a EvaluationItem object.""" + + response = _camel_key_to_snake(response) + result = super()._from_response(response=response, kwargs=kwargs) + return result + + +class EvaluationItemDict(TypedDict, total=False): + """EvaluationItem is a single evaluation request or result. + + The content of an EvaluationItem is immutable - it cannot be updated once + created. EvaluationItems can be deleted when no longer needed. + """ + + name: Optional[str] + """The resource name of the EvaluationItem.""" + + display_name: Optional[str] + """The display name of the EvaluationItem.""" + + metadata: Optional[dict[str, Any]] + """Metadata for the EvaluationItem.""" + + labels: Optional[dict[str, str]] + """Labels for the EvaluationItem.""" + + evaluation_item_type: Optional[EvaluationItemType] + """The type of the EvaluationItem.""" + + evaluation_request: Optional[EvaluationItemRequestDict] + """The request to evaluate.""" + + evaluation_response: Optional[EvaluationItemResultDict] + """The response from evaluation.""" + + gcs_uri: Optional[str] + """The Cloud Storage object where the request or response is stored.""" + + create_time: Optional[datetime.datetime] + """Timestamp when this item was created.""" + + error: Optional[genai_types.GoogleRpcStatusDict] + """Error for the evaluation item.""" + + +EvaluationItemOrDict = Union[EvaluationItem, EvaluationItemDict] + + class OptimizeConfig(_common.BaseModel): """Config for Prompt Optimizer.""" From 89a26c15b8a15c8698192dc283e5839729ad3e66 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 15:08:52 -0700 Subject: [PATCH 03/11] Copybara import of the project: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -- e33979564e36259589eb8349bd46bea76047781f by Owl Bot : feat:Auto-generated CL for //google/cloud/aiplatform:aiplatform_v1beta1_public_proto_gen PiperOrigin-RevId: 813083326 Source-Link: https://github.com/googleapis/googleapis/commit/3ecf1f04741305663178a74c94c4a81e8f237621 Source-Link: https://github.com/googleapis/googleapis-gen/commit/00da0dd22d00fcd1110a40b932b7243d8fdefb3c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDBkYTBkZDIyZDAwZmNkMTExMGE0MGI5MzJiNzI0M2Q4ZmRlZmIzYyJ9 -- efdd99aa47fa747444b677925bdbec79b3e364e1 by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md -- b9fd9fa0d0b2f9275eacd613d433818659de757e by Owl Bot : feat: Auto-generated CL for //google/cloud/aiplatform:aiplatform_v1_public_proto_gen PiperOrigin-RevId: 813096234 Source-Link: https://github.com/googleapis/googleapis/commit/e78280f4648373caffb79fe532866728625db2fe Source-Link: https://github.com/googleapis/googleapis-gen/commit/d83eeb4b0559ad0b3243152ec8b8c2310c23aec7 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZDgzZWViNGIwNTU5YWQwYjMyNDMxNTJlYzhiOGMyMzEwYzIzYWVjNyJ9 -- e01fdae7d702211173f8fb2760e6a91cf08ed953 by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md -- 5f2b22954c76980c94440652a59f80909af31c7f by Owl Bot : feat: add DeploymentTier enum to DeployedIndex PiperOrigin-RevId: 813384393 Source-Link: https://github.com/googleapis/googleapis/commit/063f9e19c5890182920980ced75828fd7c0588a5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/1119646f9e51922fbca8c3a7ae743a6c79131c1e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTExOTY0NmY5ZTUxOTIyZmJjYThjM2E3YWU3NDNhNmM3OTEzMWMxZSJ9 -- b9d3464eaaa43dce2b5f383e20cdaf07758620a5 by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md -- fabb82df6df4eeb0da40d99ba5d87bd3d36ece14 by Owl Bot : feat: Add labels field for Predict API for Imagen use case (v1beta) PiperOrigin-RevId: 815803050 Source-Link: https://github.com/googleapis/googleapis/commit/7f0c1e5d7a45598b92d2f08e02000f005b757194 Source-Link: https://github.com/googleapis/googleapis-gen/commit/a452a5dda8e72c32d60b01e751bd0cbdf6b3a26e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTQ1MmE1ZGRhOGU3MmMzMmQ2MGIwMWU3NTFiZDBjYmRmNmIzYTI2ZSJ9 -- 8cf83c2266d30a39857c197ddc363aec3c0d6836 by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md COPYBARA_INTEGRATE_REVIEW=https://github.com/googleapis/python-aiplatform/pull/5863 from googleapis:owl-bot-copy a96259558de173d03da0dfa7c72dc6d6b9f4daf3 PiperOrigin-RevId: 815901905 --- .../services/migration_service/client.py | 18 ++--- .../aiplatform_v1/types/accelerator_type.py | 3 + .../aiplatform_v1/types/index_endpoint.py | 23 +++++++ google/cloud/aiplatform_v1/types/tool.py | 67 +++++++++++++++++++ .../services/migration_service/client.py | 18 ++--- .../types/accelerator_type.py | 3 + .../types/prediction_service.py | 9 +++ google/cloud/aiplatform_v1beta1/types/tool.py | 67 +++++++++++++++++++ ...t_metadata_google.cloud.aiplatform.v1.json | 2 +- ...adata_google.cloud.aiplatform.v1beta1.json | 2 +- .../test_gen_ai_cache_service.py | 24 ++++--- .../test_index_endpoint_service.py | 6 ++ .../aiplatform_v1/test_migration_service.py | 26 +++---- .../test_gen_ai_cache_service.py | 24 ++++--- .../test_migration_service.py | 26 +++---- 15 files changed, 256 insertions(+), 62 deletions(-) diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 4ec75fa0e3..586f742c72 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -265,40 +265,40 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/types/accelerator_type.py b/google/cloud/aiplatform_v1/types/accelerator_type.py index 65e17a1e37..44c8fb5cb4 100644 --- a/google/cloud/aiplatform_v1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1/types/accelerator_type.py @@ -63,6 +63,8 @@ class AcceleratorType(proto.Enum): Nvidia B200 GPU. NVIDIA_GB200 (17): Nvidia GB200 GPU. + NVIDIA_RTX_PRO_6000 (18): + Nvidia RTX Pro 6000 GPU. TPU_V2 (6): TPU v2. TPU_V3 (7): @@ -87,6 +89,7 @@ class AcceleratorType(proto.Enum): NVIDIA_H200_141GB = 15 NVIDIA_B200 = 16 NVIDIA_GB200 = 17 + NVIDIA_RTX_PRO_6000 = 18 TPU_V2 = 6 TPU_V3 = 7 TPU_V4_POD = 10 diff --git a/google/cloud/aiplatform_v1/types/index_endpoint.py b/google/cloud/aiplatform_v1/types/index_endpoint.py index f791cb5484..bfbea76141 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -342,6 +342,10 @@ class DeployedIndex(proto.Message): Note: we only support up to 5 deployment groups(not including 'default'). + deployment_tier (google.cloud.aiplatform_v1.types.DeployedIndex.DeploymentTier): + Optional. The deployment tier that the index is deployed to. + DEPLOYMENT_TIER_UNSPECIFIED will use a system-chosen default + tier. psc_automation_configs (MutableSequence[google.cloud.aiplatform_v1.types.PSCAutomationConfig]): Optional. If set for PSC deployed index, PSC connection will be automatically created after deployment is done and the @@ -349,6 +353,20 @@ class DeployedIndex(proto.Message): private_endpoints.psc_automated_endpoints. """ + class DeploymentTier(proto.Enum): + r"""Tiers encapsulate serving time attributes like latency and + throughput. + + Values: + DEPLOYMENT_TIER_UNSPECIFIED (0): + Default deployment tier. + STORAGE (2): + Optimized for costs. + """ + + DEPLOYMENT_TIER_UNSPECIFIED = 0 + STORAGE = 2 + id: str = proto.Field( proto.STRING, number=1, @@ -407,6 +425,11 @@ class DeployedIndex(proto.Message): proto.STRING, number=11, ) + deployment_tier: DeploymentTier = proto.Field( + proto.ENUM, + number=18, + enum=DeploymentTier, + ) psc_automation_configs: MutableSequence[service_networking.PSCAutomationConfig] = ( proto.RepeatedField( proto.MESSAGE, diff --git a/google/cloud/aiplatform_v1/types/tool.py b/google/cloud/aiplatform_v1/types/tool.py index 35497bcddd..a07018da3f 100644 --- a/google/cloud/aiplatform_v1/types/tool.py +++ b/google/cloud/aiplatform_v1/types/tool.py @@ -106,21 +106,73 @@ class Tool(proto.Message): specific Function Declarations. """ + class PhishBlockThreshold(proto.Enum): + r"""These are available confidence level user can set to block + malicious urls with chosen confidence and above. For + understanding different confidence of webrisk, please refer to + https://cloud.google.com/web-risk/docs/reference/rpc/google.cloud.webrisk.v1eap1#confidencelevel + + Values: + PHISH_BLOCK_THRESHOLD_UNSPECIFIED (0): + Defaults to unspecified. + BLOCK_LOW_AND_ABOVE (30): + Blocks Low and above confidence URL that is + risky. + BLOCK_MEDIUM_AND_ABOVE (40): + Blocks Medium and above confidence URL that + is risky. + BLOCK_HIGH_AND_ABOVE (50): + Blocks High and above confidence URL that is + risky. + BLOCK_HIGHER_AND_ABOVE (55): + Blocks Higher and above confidence URL that + is risky. + BLOCK_VERY_HIGH_AND_ABOVE (60): + Blocks Very high and above confidence URL + that is risky. + BLOCK_ONLY_EXTREMELY_HIGH (100): + Blocks Extremely high confidence URL that is + risky. + """ + + PHISH_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 30 + BLOCK_MEDIUM_AND_ABOVE = 40 + BLOCK_HIGH_AND_ABOVE = 50 + BLOCK_HIGHER_AND_ABOVE = 55 + BLOCK_VERY_HIGH_AND_ABOVE = 60 + BLOCK_ONLY_EXTREMELY_HIGH = 100 + class GoogleSearch(proto.Message): r"""GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. + blocking_confidence (google.cloud.aiplatform_v1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class CodeExecution(proto.Message): r"""Tool that executes code generated by the model, and automatically @@ -692,17 +744,32 @@ class EnterpriseWebSearch(proto.Message): r"""Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. + blocking_confidence (google.cloud.aiplatform_v1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=2, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class DynamicRetrievalConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 0b00060299..97d74b86b4 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -243,40 +243,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py index ef459f6c45..62e6dec05d 100644 --- a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py @@ -63,6 +63,8 @@ class AcceleratorType(proto.Enum): Nvidia B200 GPU. NVIDIA_GB200 (17): Nvidia GB200 GPU. + NVIDIA_RTX_PRO_6000 (18): + Nvidia RTX Pro 6000 GPU. TPU_V2 (6): TPU v2. TPU_V3 (7): @@ -87,6 +89,7 @@ class AcceleratorType(proto.Enum): NVIDIA_H200_141GB = 15 NVIDIA_B200 = 16 NVIDIA_GB200 = 17 + NVIDIA_RTX_PRO_6000 = 18 TPU_V2 = 6 TPU_V3 = 7 TPU_V4_POD = 10 diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index 7d9d7a93bf..5bce4ddc9c 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -89,6 +89,10 @@ class PredictRequest(proto.Message): ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + labels (MutableMapping[str, str]): + Optional. The user labels for Imagen billing + usage only. Only Imagen supports labels. For + other use cases, it will be ignored. """ endpoint: str = proto.Field( @@ -105,6 +109,11 @@ class PredictRequest(proto.Message): number=3, message=struct_pb2.Value, ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) class PredictResponse(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py index e6d1f8babb..82be2fc989 100644 --- a/google/cloud/aiplatform_v1beta1/types/tool.py +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -107,21 +107,73 @@ class Tool(proto.Message): specific Function Declarations. """ + class PhishBlockThreshold(proto.Enum): + r"""These are available confidence level user can set to block + malicious urls with chosen confidence and above. For + understanding different confidence of webrisk, please refer to + https://cloud.google.com/web-risk/docs/reference/rpc/google.cloud.webrisk.v1eap1#confidencelevel + + Values: + PHISH_BLOCK_THRESHOLD_UNSPECIFIED (0): + Defaults to unspecified. + BLOCK_LOW_AND_ABOVE (30): + Blocks Low and above confidence URL that is + risky. + BLOCK_MEDIUM_AND_ABOVE (40): + Blocks Medium and above confidence URL that + is risky. + BLOCK_HIGH_AND_ABOVE (50): + Blocks High and above confidence URL that is + risky. + BLOCK_HIGHER_AND_ABOVE (55): + Blocks Higher and above confidence URL that + is risky. + BLOCK_VERY_HIGH_AND_ABOVE (60): + Blocks Very high and above confidence URL + that is risky. + BLOCK_ONLY_EXTREMELY_HIGH (100): + Blocks Extremely high confidence URL that is + risky. + """ + + PHISH_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 30 + BLOCK_MEDIUM_AND_ABOVE = 40 + BLOCK_HIGH_AND_ABOVE = 50 + BLOCK_HIGHER_AND_ABOVE = 55 + BLOCK_VERY_HIGH_AND_ABOVE = 60 + BLOCK_ONLY_EXTREMELY_HIGH = 100 + class GoogleSearch(proto.Message): r"""GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]. + blocking_confidence (google.cloud.aiplatform_v1beta1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class CodeExecution(proto.Message): r"""Tool that executes code generated by the model, and automatically @@ -816,17 +868,32 @@ class EnterpriseWebSearch(proto.Message): r"""Tool to search public web data, powered by Vertex AI Search and Sec4 compliance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: exclude_domains (MutableSequence[str]): Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. + blocking_confidence (google.cloud.aiplatform_v1beta1.types.Tool.PhishBlockThreshold): + Optional. Sites with confidence level chosen + & above this value will be blocked from the + search results. + + This field is a member of `oneof`_ ``_blocking_confidence``. """ exclude_domains: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) + blocking_confidence: "Tool.PhishBlockThreshold" = proto.Field( + proto.ENUM, + number=4, + optional=True, + enum="Tool.PhishBlockThreshold", + ) class DynamicRetrievalConfig(proto.Message): diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 6e0024dc79..5b83a56497 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.119.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index c49361009f..3903bbef4e 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.119.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py index dd85130663..5a0da4a120 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py @@ -4717,7 +4717,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -4727,7 +4728,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -5222,7 +5224,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -5232,7 +5235,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -6589,7 +6593,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -6599,7 +6604,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -7130,7 +7136,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -7140,7 +7147,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index 41f0436ffe..79535ab5a3 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -6502,6 +6502,7 @@ def test_create_index_endpoint_rest_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -7087,6 +7088,7 @@ def test_update_index_endpoint_rest_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -7780,6 +7782,7 @@ def test_mutate_deployed_index_rest_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -8889,6 +8892,7 @@ async def test_create_index_endpoint_rest_asyncio_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -9523,6 +9527,7 @@ async def test_update_index_endpoint_rest_asyncio_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", @@ -10282,6 +10287,7 @@ async def test_mutate_deployed_index_rest_asyncio_call_success(request_type): "reserved_ip_ranges_value2", ], "deployment_group": "deployment_group_value", + "deployment_tier": 2, "psc_automation_configs": [ { "project_id": "project_id_value", diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index bbff4dca10..1ba265257f 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -5427,19 +5427,22 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -5449,22 +5452,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py index 262cd43467..784e4d8427 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py @@ -4728,7 +4728,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -4738,7 +4739,8 @@ def test_create_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -5244,7 +5246,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -5254,7 +5257,8 @@ def test_update_cached_content_rest_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -6622,7 +6626,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -6632,7 +6637,8 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, @@ -7174,7 +7180,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} @@ -7184,7 +7191,8 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "exclude_domains": [ "exclude_domains_value1", "exclude_domains_value2", - ] + ], + "blocking_confidence": 30, }, "code_execution": {}, "url_context": {}, diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index af4ae4e5a0..d2575b79e9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -5403,19 +5403,22 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -5425,22 +5428,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "squid" + dataset = "clam" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", + "project": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) From b72df1c3e418c4b87ecb92e1506e7f3fe8da3994 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Tue, 7 Oct 2025 09:57:53 -0700 Subject: [PATCH 04/11] feat: Migrate vertex ai session service to use agent engine sdk PiperOrigin-RevId: 816259798 --- vertexai/agent_engines/templates/adk.py | 21 +++++++----- .../reasoning_engines/templates/adk.py | 33 +++++++++++-------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/vertexai/agent_engines/templates/adk.py b/vertexai/agent_engines/templates/adk.py index 4f1b0cbfaa..73292e029a 100644 --- a/vertexai/agent_engines/templates/adk.py +++ b/vertexai/agent_engines/templates/adk.py @@ -537,15 +537,20 @@ def set_up(self): if session_service_builder: self._tmpl_attrs["session_service"] = session_service_builder() elif "GOOGLE_CLOUD_AGENT_ENGINE_ID" in os.environ: - from google.adk.sessions.vertex_ai_session_service import ( - VertexAiSessionService, - ) + try: + from google.adk.sessions.vertex_ai_session_service import ( + VertexAiSessionService, + ) + + self._tmpl_attrs["session_service"] = VertexAiSessionService( + project=project, + location=location, + agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), + ) + except ImportError: + # TODO(ysian): Handle this via _g3 import for google3. + pass - self._tmpl_attrs["session_service"] = VertexAiSessionService( - project=project, - location=location, - agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), - ) else: self._tmpl_attrs["session_service"] = InMemorySessionService() diff --git a/vertexai/preview/reasoning_engines/templates/adk.py b/vertexai/preview/reasoning_engines/templates/adk.py index 451656d334..6502d1c594 100644 --- a/vertexai/preview/reasoning_engines/templates/adk.py +++ b/vertexai/preview/reasoning_engines/templates/adk.py @@ -512,21 +512,26 @@ def set_up(self): if session_service_builder: self._tmpl_attrs["session_service"] = session_service_builder() elif "GOOGLE_CLOUD_AGENT_ENGINE_ID" in os.environ: - from google.adk.sessions.vertex_ai_session_service import ( - VertexAiSessionService, - ) - - if is_version_sufficient("1.5.0"): - self._tmpl_attrs["session_service"] = VertexAiSessionService( - project=project, - location=location, - agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), - ) - else: - self._tmpl_attrs["session_service"] = VertexAiSessionService( - project=project, - location=location, + try: + from google.adk.sessions.vertex_ai_session_service import ( + VertexAiSessionService, ) + + if is_version_sufficient("1.5.0"): + self._tmpl_attrs["session_service"] = VertexAiSessionService( + project=project, + location=location, + agent_engine_id=os.environ.get("GOOGLE_CLOUD_AGENT_ENGINE_ID"), + ) + else: + self._tmpl_attrs["session_service"] = VertexAiSessionService( + project=project, + location=location, + ) + except ImportError: + # TODO(deanchen): Handle this via _g3 import for google3. + pass + else: self._tmpl_attrs["session_service"] = InMemorySessionService() From 0dd3ccd5e85f3c790f8f00154e867ab2a3044aed Mon Sep 17 00:00:00 2001 From: Yeesian Ng Date: Tue, 7 Oct 2025 10:36:18 -0700 Subject: [PATCH 05/11] chore: Add logging when deleting agent engine resource PiperOrigin-RevId: 816278193 --- .../genai/replays/test_delete_agent_engine.py | 11 +++- .../unit/vertexai/genai/test_agent_engines.py | 4 +- vertexai/_genai/agent_engines.py | 60 ++++++++++++++++++- 3 files changed, 69 insertions(+), 6 deletions(-) diff --git a/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py b/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py index d4f59f25ba..ea83bdeb2f 100644 --- a/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py +++ b/tests/unit/vertexai/genai/replays/test_delete_agent_engine.py @@ -14,6 +14,7 @@ # # pylint: disable=protected-access,bad-continuation,missing-function-docstring +import logging import pytest @@ -21,10 +22,13 @@ from vertexai._genai import types -def test_agent_engine_delete(client): +def test_agent_engine_delete(client, caplog): + caplog.set_level(logging.INFO) agent_engine = client.agent_engines.create() operation = client.agent_engines.delete(name=agent_engine.api_resource.name) assert isinstance(operation, types.DeleteAgentEngineOperation) + assert "Deleting AgentEngine resource" in caplog.text + assert f"Started AgentEngine delete operation: {operation.name}" in caplog.text pytestmark = pytest_helper.setup( @@ -38,10 +42,13 @@ def test_agent_engine_delete(client): @pytest.mark.asyncio -async def test_agent_engine_delete_async(client): +async def test_agent_engine_delete_async(client, caplog): + caplog.set_level(logging.INFO) # TODO(b/431785750): use async methods for create() when available agent_engine = client.agent_engines.create() operation = await client.aio.agent_engines.delete( name=agent_engine.api_resource.name ) assert isinstance(operation, types.DeleteAgentEngineOperation) + assert "Deleting AgentEngine resource" in caplog.text + assert f"Started AgentEngine delete operation: {operation.name}" in caplog.text diff --git a/tests/unit/vertexai/genai/test_agent_engines.py b/tests/unit/vertexai/genai/test_agent_engines.py index ea4df4bfdd..b06a09c7ba 100644 --- a/tests/unit/vertexai/genai/test_agent_engines.py +++ b/tests/unit/vertexai/genai/test_agent_engines.py @@ -2271,7 +2271,7 @@ def teardown_method(self): def test_delete_agent_engine(self): with mock.patch.object( - self.client.agent_engines._api_client, "async_request" + self.client.aio.agent_engines._api_client, "async_request" ) as request_mock: request_mock.return_value = genai_types.HttpResponse(body="") asyncio.run( @@ -2288,7 +2288,7 @@ def test_delete_agent_engine(self): def test_delete_agent_engine_force(self): with mock.patch.object( - self.client.agent_engines._api_client, "async_request" + self.client.aio.agent_engines._api_client, "async_request" ) as request_mock: request_mock.return_value = genai_types.HttpResponse(body="") asyncio.run( diff --git a/vertexai/_genai/agent_engines.py b/vertexai/_genai/agent_engines.py index b45edab806..34083d890e 100644 --- a/vertexai/_genai/agent_engines.py +++ b/vertexai/_genai/agent_engines.py @@ -318,7 +318,7 @@ def _create( self._api_client._verify_response(return_value) return return_value - def delete( + def _delete( self, *, name: str, @@ -725,6 +725,34 @@ def get( self._register_api_methods(agent_engine=agent_engine) return agent_engine + def delete( + self, + *, + name: str, + force: Optional[bool] = None, + config: Optional[types.DeleteAgentEngineConfigOrDict] = None, + ) -> types.DeleteAgentEngineOperation: + """ + Delete an Agent Engine resource. + + Args: + name (str): + Required. The name of the Agent Engine to be deleted. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}` + or `reasoningEngines/{resource_id}`. + force (bool): + Optional. If set to True, child resources will also be deleted. + Otherwise, the request will fail with FAILED_PRECONDITION error when + the Agent Engine has undeleted child resources. Defaults to False. + config (DeleteAgentEngineConfig): + Optional. Additional configurations for deleting the Agent Engine. + + """ + logger.info(f"Deleting AgentEngine resource: {name}") + operation = self._delete(name=name, force=force, config=config) + logger.info(f"Started AgentEngine delete operation: {operation.name}") + return operation + def create( self, *, @@ -1659,7 +1687,7 @@ async def _create( self._api_client._verify_response(return_value) return return_value - async def delete( + async def _delete( self, *, name: str, @@ -1994,6 +2022,34 @@ async def _update( _memories = None _sessions = None + async def delete( + self, + *, + name: str, + force: Optional[bool] = None, + config: Optional[types.DeleteAgentEngineConfigOrDict] = None, + ) -> types.DeleteAgentEngineOperation: + """ + Delete an Agent Engine resource. + + Args: + name (str): + Required. The name of the Agent Engine to be deleted. Format: + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}` + or `reasoningEngines/{resource_id}`. + force (bool): + Optional. If set to True, child resources will also be deleted. + Otherwise, the request will fail with FAILED_PRECONDITION error when + the Agent Engine has undeleted child resources. Defaults to False. + config (DeleteAgentEngineConfig): + Optional. Additional configurations for deleting the Agent Engine. + + """ + logger.info(f"Deleting AgentEngine resource: {name}") + operation = await self._delete(name=name, force=force, config=config) + logger.info(f"Started AgentEngine delete operation: {operation.name}") + return operation + @property def memories(self): if self._memories is None: From 0077bdebe62d0363c1faacd5f6e5562086dff080 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Tue, 7 Oct 2025 12:19:04 -0700 Subject: [PATCH 06/11] docs: Fix Sessions SDK docstrings PiperOrigin-RevId: 816325320 --- vertexai/_genai/session_events.py | 4 ++-- vertexai/_genai/sessions.py | 16 ++++++++-------- vertexai/_genai/types.py | 8 ++++---- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/vertexai/_genai/session_events.py b/vertexai/_genai/session_events.py index 1ce8284607..10288b028e 100644 --- a/vertexai/_genai/session_events.py +++ b/vertexai/_genai/session_events.py @@ -142,7 +142,7 @@ def append( Appends Agent Engine session event. Args: - name (str): Required. The name of the Agent Engine session to append event for. Format: + name (str): Required. The name of the Agent Engine session to append the event to. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. author (str): Required. The author of the Agent Engine session event. invocation_id (str): Required. The invocation ID of the Agent Engine session event. @@ -313,7 +313,7 @@ async def append( Appends Agent Engine session event. Args: - name (str): Required. The name of the Agent Engine session to append event for. Format: + name (str): Required. The name of the Agent Engine session to append the event to. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. author (str): Required. The author of the Agent Engine session event. invocation_id (str): Required. The invocation ID of the Agent Engine session event. diff --git a/vertexai/_genai/sessions.py b/vertexai/_genai/sessions.py index 2588996022..b7cc4eb286 100644 --- a/vertexai/_genai/sessions.py +++ b/vertexai/_genai/sessions.py @@ -225,7 +225,7 @@ def _create( Creates a new session in the Agent Engine. Args: - name (str): Required. The name of the Agent Engine session to be created. Format: + name (str): Required. The name of the Agent Engine to create the session under. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. user_id (str): Required. The user ID of the session. config (CreateAgentEngineSessionConfig): @@ -293,7 +293,7 @@ def delete( Args: name (str): Required. The name of the Agent Engine session to be deleted. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (DeleteAgentEngineSessionConfig): Optional. Additional configurations for deleting the Agent Engine session. @@ -358,7 +358,7 @@ def get( Args: name (str): Required. The name of the Agent Engine session to get. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (GetAgentEngineSessionConfig): Optional. Additional configurations for getting the Agent Engine session. @@ -539,7 +539,7 @@ def _update( Args: name (str): Required. The name of the Agent Engine session to be updated. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (UpdateAgentEngineSessionConfig): Optional. Additional configurations for updating the Agent Engine session. @@ -699,7 +699,7 @@ async def _create( Creates a new session in the Agent Engine. Args: - name (str): Required. The name of the Agent Engine session to be created. Format: + name (str): Required. The name of the Agent Engine to create the session under. Format: `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. user_id (str): Required. The user ID of the session. config (CreateAgentEngineSessionConfig): @@ -769,7 +769,7 @@ async def delete( Args: name (str): Required. The name of the Agent Engine session to be deleted. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (DeleteAgentEngineSessionConfig): Optional. Additional configurations for deleting the Agent Engine session. @@ -836,7 +836,7 @@ async def get( Args: name (str): Required. The name of the Agent Engine session to get. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (GetAgentEngineSessionConfig): Optional. Additional configurations for getting the Agent Engine session. @@ -1023,7 +1023,7 @@ async def _update( Args: name (str): Required. The name of the Agent Engine session to be updated. Format: - `projects/{project}/locations/{location}/reasoningEngines/{resource_id}`. + `projects/{project}/locations/{location}/reasoningEngines/{resource_id}/sessions/{session_id}`. config (UpdateAgentEngineSessionConfig): Optional. Additional configurations for updating the Agent Engine session. diff --git a/vertexai/_genai/types.py b/vertexai/_genai/types.py index fda95b4213..3935c670d7 100644 --- a/vertexai/_genai/types.py +++ b/vertexai/_genai/types.py @@ -7467,7 +7467,7 @@ class _GetAgentEngineSessionRequestParameters(_common.BaseModel): """Parameters for getting an agent engine session.""" name: Optional[str] = Field( - default=None, description="""Name of the agent engine.""" + default=None, description="""Name of the agent engine session.""" ) config: Optional[GetAgentEngineSessionConfig] = Field( default=None, description="""""" @@ -7478,7 +7478,7 @@ class _GetAgentEngineSessionRequestParametersDict(TypedDict, total=False): """Parameters for getting an agent engine session.""" name: Optional[str] - """Name of the agent engine.""" + """Name of the agent engine session.""" config: Optional[GetAgentEngineSessionConfigDict] """""" @@ -7982,7 +7982,7 @@ class ListAgentEngineSessionEventsConfigDict(TypedDict, total=False): class _ListAgentEngineSessionEventsRequestParameters(_common.BaseModel): - """Parameters for listing agent engines.""" + """Parameters for listing agent engine session events.""" name: Optional[str] = Field( default=None, description="""Name of the agent engine session.""" @@ -7993,7 +7993,7 @@ class _ListAgentEngineSessionEventsRequestParameters(_common.BaseModel): class _ListAgentEngineSessionEventsRequestParametersDict(TypedDict, total=False): - """Parameters for listing agent engines.""" + """Parameters for listing agent engine session events.""" name: Optional[str] """Name of the agent engine session.""" From 64b0665169b045ccfd16e537af2e45a4dd7977e0 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Tue, 7 Oct 2025 14:53:20 -0700 Subject: [PATCH 07/11] fix: GenAI Client(evals) - Parse user defined rubrics correctly to LLM metric PiperOrigin-RevId: 816389819 --- vertexai/_genai/_evals_metric_handlers.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/vertexai/_genai/_evals_metric_handlers.py b/vertexai/_genai/_evals_metric_handlers.py index 8f97f37f92..d431e9b742 100644 --- a/vertexai/_genai/_evals_metric_handlers.py +++ b/vertexai/_genai/_evals_metric_handlers.py @@ -462,15 +462,25 @@ def _build_rubric_based_input( "must be a dictionary." ) - rubrics_list = rubric_groups_data.get(self.metric.rubric_group_name, []) + rubric_group_from_data = rubric_groups_data.get( + self.metric.rubric_group_name, {} + ) + if isinstance(rubric_group_from_data, dict): + rubrics_list = rubric_group_from_data.get("rubrics", []) + else: + rubrics_list = [] + if not isinstance(rubrics_list, list): logger.warning( - "Rubric group '%s' in 'rubric_groups' is not a list for case %s.", + "Rubrics for group '%s' in case %s is not a list: %s. " + "Skipping rubrics for this case.", self.metric.rubric_group_name, eval_case.eval_case_id, + rubrics_list, ) rubrics_list = [] + parsed_rubrics = [types.Rubric(**r) for r in rubrics_list] rubric_enhanced_contents = { "prompt": ( [eval_case.prompt.model_dump(mode="json", exclude_none=True)] @@ -481,8 +491,8 @@ def _build_rubric_based_input( "rubric_groups": { self.metric.rubric_group_name: { "rubrics": [ - r.model_dump(mode="json") if isinstance(r, types.Rubric) else r - for r in rubrics_list + r.model_dump(mode="json", exclude_none=True) + for r in parsed_rubrics ] } }, From 0a369eacaa4e21e8055ef7c2e0f4c6da3a9298f8 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Tue, 7 Oct 2025 18:40:11 -0700 Subject: [PATCH 08/11] feat: Add ability to use existing sessions for streaming_agent_run_with_events calls. PiperOrigin-RevId: 816470728 --- vertexai/agent_engines/templates/adk.py | 58 +++++++++++++----- .../reasoning_engines/templates/adk.py | 59 +++++++++++++------ 2 files changed, 83 insertions(+), 34 deletions(-) diff --git a/vertexai/agent_engines/templates/adk.py b/vertexai/agent_engines/templates/adk.py index 73292e029a..4f7fef509f 100644 --- a/vertexai/agent_engines/templates/adk.py +++ b/vertexai/agent_engines/templates/adk.py @@ -169,6 +169,9 @@ def __init__(self, **kwargs): self.user_id: Optional[str] = kwargs.get("user_id", _DEFAULT_USER_ID) # The user ID. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. + class _StreamingRunResponse: """Response object for `streaming_agent_run_with_events` method. @@ -181,6 +184,8 @@ def __init__(self, **kwargs): # List of generated events. self.artifacts: Optional[List[_Artifact]] = kwargs.get("artifacts") # List of artifacts belonging to the session. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. def dump(self) -> Dict[str, Any]: from vertexai.agent_engines import _utils @@ -194,6 +199,8 @@ def dump(self) -> Dict[str, Any]: result["events"].append(event_dict) if self.artifacts: result["artifacts"] = [artifact.dump() for artifact in self.artifacts] + if self.session_id: + result["session_id"] = self.session_id return result @@ -402,7 +409,10 @@ async def _init_session( auth = _Authorization(**auth) session_state[f"temp:{auth_id}"] = auth.access_token - session_id = f"temp_session_{random.randbytes(8).hex()}" + if request.session_id: + session_id = request.session_id + else: + session_id = f"temp_session_{random.randbytes(8).hex()}" session = await session_service.create_session( app_name=self._tmpl_attrs.get("app_name"), user_id=request.user_id, @@ -450,7 +460,9 @@ async def _convert_response_events( """Converts the events to the streaming run response object.""" import collections - result = _StreamingRunResponse(events=events, artifacts=[]) + result = _StreamingRunResponse( + events=events, artifacts=[], session_id=session_id + ) # Save the generated artifacts into the result object. artifact_versions = collections.defaultdict(list) @@ -685,22 +697,35 @@ async def streaming_agent_run_with_events(self, request_json: str): request = _StreamRunRequest(**json.loads(request_json)) if not self._tmpl_attrs.get("in_memory_runner"): self.set_up() - if not self._tmpl_attrs.get("artifact_service"): - self.set_up() # Prepare the in-memory session. if not self._tmpl_attrs.get("in_memory_artifact_service"): self.set_up() if not self._tmpl_attrs.get("in_memory_session_service"): self.set_up() - session = await self._init_session( - session_service=self._tmpl_attrs.get("in_memory_session_service"), - artifact_service=self._tmpl_attrs.get("in_memory_artifact_service"), - request=request, - ) + session_service = self._tmpl_attrs.get("in_memory_session_service") + artifact_service = self._tmpl_attrs.get("in_memory_artifact_service") + # Try to get the session, if it doesn't exist, create a new one. + session = None + if request.session_id: + try: + session = await session_service.get_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=request.session_id, + ) + except RuntimeError: + pass + if not session: + # Fall back to create session if the session is not found. + session = await self._init_session( + session_service=session_service, + artifact_service=artifact_service, + request=request, + ) if not session: raise RuntimeError("Session initialization failed.") - # Run the agent. + # Run the agent message_for_agent = types.Content(**request.message) try: async for event in self._tmpl_attrs.get("in_memory_runner").run_async( @@ -712,15 +737,16 @@ async def streaming_agent_run_with_events(self, request_json: str): user_id=request.user_id, session_id=session.id, events=[event], - artifact_service=self._tmpl_attrs.get("in_memory_artifact_service"), + artifact_service=artifact_service, ) yield converted_event finally: - await self._tmpl_attrs.get("in_memory_session_service").delete_session( - app_name=self._tmpl_attrs.get("app_name"), - user_id=request.user_id, - session_id=session.id, - ) + if session and not request.session_id: + await session_service.delete_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=session.id, + ) async def async_get_session( self, diff --git a/vertexai/preview/reasoning_engines/templates/adk.py b/vertexai/preview/reasoning_engines/templates/adk.py index 6502d1c594..d6336e13d4 100644 --- a/vertexai/preview/reasoning_engines/templates/adk.py +++ b/vertexai/preview/reasoning_engines/templates/adk.py @@ -183,6 +183,9 @@ def __init__(self, **kwargs): self.user_id: Optional[str] = kwargs.get("user_id", _DEFAULT_USER_ID) # The user ID. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. + class _StreamingRunResponse: """Response object for `streaming_agent_run_with_events` method. @@ -195,6 +198,8 @@ def __init__(self, **kwargs): # List of generated events. self.artifacts: Optional[List[_Artifact]] = kwargs.get("artifacts") # List of artifacts belonging to the session. + self.session_id: Optional[str] = kwargs.get("session_id") + # The session ID. def dump(self) -> Dict[str, Any]: from vertexai.agent_engines import _utils @@ -208,6 +213,8 @@ def dump(self) -> Dict[str, Any]: result["events"].append(event_dict) if self.artifacts: result["artifacts"] = [artifact.dump() for artifact in self.artifacts] + if self.session_id: + result["session_id"] = self.session_id return result @@ -383,7 +390,10 @@ async def _init_session( auth = _Authorization(**auth) session_state[f"temp:{auth_id}"] = auth.access_token - session_id = f"temp_session_{random.randbytes(8).hex()}" + if request.session_id: + session_id = request.session_id + else: + session_id = f"temp_session_{random.randbytes(8).hex()}" session = await session_service.create_session( app_name=self._tmpl_attrs.get("app_name"), user_id=request.user_id, @@ -431,7 +441,9 @@ async def _convert_response_events( """Converts the events to the streaming run response object.""" import collections - result = _StreamingRunResponse(events=events, artifacts=[]) + result = _StreamingRunResponse( + events=events, artifacts=[], session_id=session_id + ) # Save the generated artifacts into the result object. artifact_versions = collections.defaultdict(list) @@ -735,21 +747,33 @@ async def _invoke_agent_async(): request = _StreamRunRequest(**json.loads(request_json)) if not self._tmpl_attrs.get("in_memory_runner"): self.set_up() - if not self._tmpl_attrs.get("artifact_service"): - self.set_up() # Prepare the in-memory session. if not self._tmpl_attrs.get("in_memory_artifact_service"): self.set_up() if not self._tmpl_attrs.get("in_memory_session_service"): self.set_up() - session = await self._init_session( - session_service=self._tmpl_attrs.get("in_memory_session_service"), - artifact_service=self._tmpl_attrs.get("in_memory_artifact_service"), - request=request, - ) + session_service = self._tmpl_attrs.get("in_memory_session_service") + artifact_service = self._tmpl_attrs.get("in_memory_artifact_service") + # Try to get the session, if it doesn't exist, create a new one. + session = None + if request.session_id: + try: + session = await session_service.get_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=request.session_id, + ) + except RuntimeError: + pass + if not session: + # Fall back to create session if the session is not found. + session = await self._init_session( + session_service=session_service, + artifact_service=artifact_service, + request=request, + ) if not session: raise RuntimeError("Session initialization failed.") - # Run the agent. message_for_agent = types.Content(**request.message) try: @@ -762,17 +786,16 @@ async def _invoke_agent_async(): user_id=request.user_id, session_id=session.id, events=[event], - artifact_service=self._tmpl_attrs.get( - "in_memory_artifact_service" - ), + artifact_service=artifact_service, ) event_queue.put(converted_event) finally: - await self._tmpl_attrs.get("in_memory_session_service").delete_session( - app_name=self._tmpl_attrs.get("app_name"), - user_id=request.user_id, - session_id=session.id, - ) + if session and not request.session_id: + await session_service.delete_session( + app_name=self._tmpl_attrs.get("app_name"), + user_id=request.user_id, + session_id=session.id, + ) def _asyncio_thread_main(): try: From 21dac70f8c3962230bcd99f5ff434c8a9031c58f Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 09:51:07 -0700 Subject: [PATCH 09/11] chore(main): release 1.120.0 (#5883) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 26 +++++++++++++++++++ google/cloud/aiplatform/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1/gapic_version.py | 2 +- .../v1/schema/predict/params/gapic_version.py | 2 +- .../schema/predict/params_v1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../predict/prediction_v1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1beta1/gapic_version.py | 2 +- .../schema/predict/params/gapic_version.py | 2 +- .../predict/params_v1beta1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../prediction_v1beta1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1beta1/gapic_version.py | 2 +- google/cloud/aiplatform/version.py | 2 +- google/cloud/aiplatform_v1/gapic_version.py | 2 +- .../cloud/aiplatform_v1beta1/gapic_version.py | 2 +- pypi/_vertex_ai_placeholder/version.py | 2 +- ...t_metadata_google.cloud.aiplatform.v1.json | 2 +- ...adata_google.cloud.aiplatform.v1beta1.json | 2 +- 25 files changed, 50 insertions(+), 24 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d60fae21f4..506bfad55f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.119.0" + ".": "1.120.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6413491acf..6d4a43adfc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## [1.120.0](https://github.com/googleapis/python-aiplatform/compare/v1.119.0...v1.120.0) (2025-10-08) + + +### Features + +* Add ability to use existing sessions for streaming_agent_run_with_events calls. ([0a369ea](https://github.com/googleapis/python-aiplatform/commit/0a369eacaa4e21e8055ef7c2e0f4c6da3a9298f8)) +* Add DeploymentTier enum to DeployedIndex ([89a26c1](https://github.com/googleapis/python-aiplatform/commit/89a26c15b8a15c8698192dc283e5839729ad3e66)) +* Add labels field for Predict API for Imagen use case (v1beta) ([89a26c1](https://github.com/googleapis/python-aiplatform/commit/89a26c15b8a15c8698192dc283e5839729ad3e66)) +* Auto-generated CL for //google/cloud/aiplatform:aiplatform_v1_public_proto_gen ([89a26c1](https://github.com/googleapis/python-aiplatform/commit/89a26c15b8a15c8698192dc283e5839729ad3e66)) +* Expose PSC for CustomModel ([d02099c](https://github.com/googleapis/python-aiplatform/commit/d02099cf200f7e372ad0c38f98a5b8c92bcc581c)) +* GenAI Client(evals) - Add `get_evaluation_set` and `get_evaluation_item` methods to Vertex AI GenAI SDK evals ([7757886](https://github.com/googleapis/python-aiplatform/commit/77578867ff7c4e8a9c4618481821cded32b4b135)) +* GenAI Client(evals) - Add `show` method for EvaluationRun class in Vertex AI GenAI SDK evals ([0c932b9](https://github.com/googleapis/python-aiplatform/commit/0c932b99bafde099734cf136828583de23fbaeb6)) +* Migrate vertex ai session service to use agent engine sdk ([b72df1c](https://github.com/googleapis/python-aiplatform/commit/b72df1c3e418c4b87ecb92e1506e7f3fe8da3994)) +* Migrate vertex_ai_session_service to use Agent Engine SDK ([55b54a2](https://github.com/googleapis/python-aiplatform/commit/55b54a21005587338abdd66c7605f534294b0c94)) +* Migrate vertex_ai_session_service to use Agent Engine SDK ([03772e2](https://github.com/googleapis/python-aiplatform/commit/03772e2a87f2fc6c66a69220e004522cc6ba1f94)) + + +### Bug Fixes + +* GenAI Client(evals) - Parse user defined rubrics correctly to LLM metric ([64b0665](https://github.com/googleapis/python-aiplatform/commit/64b0665169b045ccfd16e537af2e45a4dd7977e0)) + + +### Documentation + +* Fix Sessions SDK docstrings ([0077bde](https://github.com/googleapis/python-aiplatform/commit/0077bdebe62d0363c1faacd5f6e5562086dff080)) + ## [1.119.0](https://github.com/googleapis/python-aiplatform/compare/v1.118.0...v1.119.0) (2025-10-03) diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 20b3ea909a..6fe505dcba 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.119.0" +__version__ = "1.120.0" diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index e79200d4f2..85adbe7e01 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.119.0" # {x-release-please-version} +__version__ = "1.120.0" # {x-release-please-version} diff --git a/pypi/_vertex_ai_placeholder/version.py b/pypi/_vertex_ai_placeholder/version.py index 442134d161..40bea58715 100644 --- a/pypi/_vertex_ai_placeholder/version.py +++ b/pypi/_vertex_ai_placeholder/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.119.0" +__version__ = "1.120.0" diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 5b83a56497..0519cfa693 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "0.1.0" + "version": "1.120.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 3903bbef4e..a8c22e4822 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "0.1.0" + "version": "1.120.0" }, "snippets": [ { From fbe6778f055f4c142591ebf2f794809dc964049c Mon Sep 17 00:00:00 2001 From: "codeflash-ai[bot]" <148906541+codeflash-ai[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 22:22:22 +0000 Subject: [PATCH 10/11] Optimize PipelineRuntimeConfigBuilder.build The optimized code achieves a 2939% speedup by **pre-computing version comparisons in the constructor** instead of repeating expensive `packaging.version.parse()` calls during execution. **Key optimizations:** 1. **Version parsing moved to `__init__`**: The original code called `packaging.version.parse(self._schema_version)` and `packaging.version.parse("2.0.0")` on every `build()` and `_get_vertex_value()` call. The optimized version parses these once during initialization and stores the results as `self._parsed_schema_version` and `self._is_version_gt_2`. 2. **Boolean flag for version comparison**: Instead of repeating the expensive version comparison `> packaging.version.parse("2.0.0")`, the code uses the pre-computed boolean `self._is_version_gt_2`. **Why this is faster:** - `packaging.version.parse()` involves string parsing and object creation, which is computationally expensive - In the original code, these parsing operations dominated execution time (95.4% in `_get_vertex_value` and significant time in `build()`) - The optimization eliminates redundant parsing - especially impactful when `_get_vertex_value()` is called multiple times per `build()` invocation **Test case performance:** - **Large-scale scenarios benefit most**: Tests with 500+ parameters show 2700-5500% speedups because `_get_vertex_value()` is called repeatedly - **Basic scenarios**: Even simple cases show 700-1200% improvements due to eliminating version parsing overhead - **Edge cases**: Error scenarios still improve 400-600% as version parsing happens before validation logic The optimization is particularly effective for pipelines with many parameters, where the version comparison cost scales linearly with parameter count. --- google/cloud/aiplatform/utils/pipeline_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/google/cloud/aiplatform/utils/pipeline_utils.py b/google/cloud/aiplatform/utils/pipeline_utils.py index 88323efe76..5791a54338 100644 --- a/google/cloud/aiplatform/utils/pipeline_utils.py +++ b/google/cloud/aiplatform/utils/pipeline_utils.py @@ -68,6 +68,10 @@ def __init__( self._input_artifacts = copy.deepcopy(input_artifacts or {}) self._failure_policy = failure_policy self._default_runtime = default_runtime + self._parsed_schema_version = packaging.version.parse(schema_version) + self._is_version_gt_2 = self._parsed_schema_version > packaging.version.parse( + "2.0.0" + ) @classmethod def from_job_spec_json( @@ -188,9 +192,7 @@ def build(self) -> Dict[str, Any]: "Pipeline root must be specified, either during " "compile time, or when calling the service." ) - if packaging.version.parse(self._schema_version) > packaging.version.parse( - "2.0.0" - ): + if self._is_version_gt_2: parameter_values_key = "parameterValues" else: parameter_values_key = "parameters" @@ -243,9 +245,7 @@ def _get_vertex_value( "pipeline job input definitions.".format(name) ) - if packaging.version.parse(self._schema_version) <= packaging.version.parse( - "2.0.0" - ): + if not self._is_version_gt_2: result = {} if self._parameter_types[name] == "INT": result["intValue"] = value From 4ca9fccfed52cda4661e308aa9915f1bd0ea8492 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Wed, 8 Oct 2025 17:32:42 -0700 Subject: [PATCH 11/11] feat: GenAI SDK client(evals) Update data model for agent evaluation PiperOrigin-RevId: 816927504 --- tests/unit/vertexai/genai/test_evals.py | 46 +++++++++++--------- vertexai/_genai/types.py | 56 +++++++++++++++++-------- 2 files changed, 66 insertions(+), 36 deletions(-) diff --git a/tests/unit/vertexai/genai/test_evals.py b/tests/unit/vertexai/genai/test_evals.py index 475f60913d..d21d536fdb 100644 --- a/tests/unit/vertexai/genai/test_evals.py +++ b/tests/unit/vertexai/genai/test_evals.py @@ -2297,10 +2297,10 @@ def test_convert_tool_call_parts(self): ) -class TestAgentMetadata: - """Unit tests for the AgentMetadata class.""" +class TestAgentInfo: + """Unit tests for the AgentInfo class.""" - def test_agent_metadata_creation(self): + def test_agent_info_creation(self): tool = genai_types.Tool( function_declarations=[ genai_types.FunctionDeclaration( @@ -2313,18 +2313,16 @@ def test_agent_metadata_creation(self): ) ] ) - agent_metadata = vertexai_genai_types.AgentMetadata( + agent_info = vertexai_genai_types.AgentInfo( name="agent1", instruction="instruction1", description="description1", tool_declarations=[tool], - sub_agent_names=["sub_agent1"], ) - assert agent_metadata.name == "agent1" - assert agent_metadata.instruction == "instruction1" - assert agent_metadata.description == "description1" - assert agent_metadata.tool_declarations == [tool] - assert agent_metadata.sub_agent_names == ["sub_agent1"] + assert agent_info.name == "agent1" + assert agent_info.instruction == "instruction1" + assert agent_info.description == "description1" + assert agent_info.tool_declarations == [tool] class TestEvent: @@ -2359,13 +2357,11 @@ def test_eval_case_with_agent_eval_fields(self): ) ] ) - agent_metadata = { - "agent1": vertexai_genai_types.AgentMetadata( - name="agent1", - instruction="instruction1", - tool_declarations=[tool], - ) - } + agent_info = vertexai_genai_types.AgentInfo( + name="agent1", + instruction="instruction1", + tool_declarations=[tool], + ) intermediate_events = [ vertexai_genai_types.Event( event_id="event1", @@ -2381,14 +2377,26 @@ def test_eval_case_with_agent_eval_fields(self): response=genai_types.Content(parts=[genai_types.Part(text="Hi")]) ) ], - agent_metadata=agent_metadata, + agent_info=agent_info, intermediate_events=intermediate_events, ) - assert eval_case.agent_metadata == agent_metadata + assert eval_case.agent_info == agent_info assert eval_case.intermediate_events == intermediate_events +class TestSessionInput: + """Unit tests for the SessionInput class.""" + + def test_session_input_creation(self): + session_input = vertexai_genai_types.SessionInput( + user_id="user1", + state={"key": "value"}, + ) + assert session_input.user_id == "user1" + assert session_input.state == {"key": "value"} + + class TestMetric: """Unit tests for the Metric class.""" diff --git a/vertexai/_genai/types.py b/vertexai/_genai/types.py index 3935c670d7..f3409c8b95 100644 --- a/vertexai/_genai/types.py +++ b/vertexai/_genai/types.py @@ -10398,8 +10398,8 @@ class EvalRunInferenceConfigDict(TypedDict, total=False): EvalRunInferenceConfigOrDict = Union[EvalRunInferenceConfig, EvalRunInferenceConfigDict] -class AgentMetadata(_common.BaseModel): - """AgentMetadata for agent eval.""" +class AgentInfo(_common.BaseModel): + """The agent info of an agent, used for agent eval.""" name: Optional[str] = Field( default=None, description="""Agent name, used as an identifier.""" @@ -10413,13 +10413,10 @@ class AgentMetadata(_common.BaseModel): tool_declarations: Optional[genai_types.ToolListUnion] = Field( default=None, description="""List of tools used by the Agent.""" ) - sub_agent_names: Optional[list[str]] = Field( - default=None, description="""List of sub-agent names.""" - ) -class AgentMetadataDict(TypedDict, total=False): - """AgentMetadata for agent eval.""" +class AgentInfoDict(TypedDict, total=False): + """The agent info of an agent, used for agent eval.""" name: Optional[str] """Agent name, used as an identifier.""" @@ -10433,11 +10430,8 @@ class AgentMetadataDict(TypedDict, total=False): tool_declarations: Optional[genai_types.ToolListUnionDict] """List of tools used by the Agent.""" - sub_agent_names: Optional[list[str]] - """List of sub-agent names.""" - -AgentMetadataOrDict = Union[AgentMetadata, AgentMetadataDict] +AgentInfoOrDict = Union[AgentInfo, AgentInfoDict] class ContentMapContents(_common.BaseModel): @@ -10669,11 +10663,11 @@ class EvalCase(_common.BaseModel): ) intermediate_events: Optional[list[Event]] = Field( default=None, - description="""Intermediate events of a single turn in agent eval or intermediate events of the last turn for multi-turn agent eval.""", + description="""This field is experimental and may change in future versions. Intermediate events of a single turn in an agent run or intermediate events of the last turn for multi-turn an agent run.""", ) - agent_metadata: Optional[dict[str, AgentMetadata]] = Field( + agent_info: Optional[AgentInfo] = Field( default=None, - description="""Agent metadata for agent eval, keyed by agent name. This can be extended for multi-agent evaluation.""", + description="""This field is experimental and may change in future versions. The agent info of the agent under evaluation. This can be extended for multi-agent evaluation.""", ) # Allow extra fields to support custom metric prompts and stay backward compatible. model_config = ConfigDict(frozen=True, extra="allow") @@ -10704,10 +10698,10 @@ class EvalCaseDict(TypedDict, total=False): """Unique identifier for the evaluation case.""" intermediate_events: Optional[list[EventDict]] - """Intermediate events of a single turn in agent eval or intermediate events of the last turn for multi-turn agent eval.""" + """This field is experimental and may change in future versions. Intermediate events of a single turn in an agent run or intermediate events of the last turn for multi-turn an agent run.""" - agent_metadata: Optional[dict[str, AgentMetadataDict]] - """Agent metadata for agent eval, keyed by agent name. This can be extended for multi-agent evaluation.""" + agent_info: Optional[AgentInfoDict] + """This field is experimental and may change in future versions. The agent info of the agent under evaluation. This can be extended for multi-agent evaluation.""" EvalCaseOrDict = Union[EvalCase, EvalCaseDict] @@ -11076,6 +11070,34 @@ class EvaluationResultDict(TypedDict, total=False): EvaluationResultOrDict = Union[EvaluationResult, EvaluationResultDict] +class SessionInput(_common.BaseModel): + """This field is experimental and may change in future versions. + + Input to initialize a session and run an agent, used for agent evaluation. + """ + + user_id: Optional[str] = Field(default=None, description="""The user id.""") + state: Optional[dict[str, str]] = Field( + default=None, description="""The state of the session.""" + ) + + +class SessionInputDict(TypedDict, total=False): + """This field is experimental and may change in future versions. + + Input to initialize a session and run an agent, used for agent evaluation. + """ + + user_id: Optional[str] + """The user id.""" + + state: Optional[dict[str, str]] + """The state of the session.""" + + +SessionInputOrDict = Union[SessionInput, SessionInputDict] + + class WinRateStats(_common.BaseModel): """Statistics for win rates for a single metric."""