From 8155ecf4b750056ab707123c8135832983877cef Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Fri, 23 Jun 2023 16:34:13 +0000 Subject: [PATCH 01/15] chore(main): release 2.39.2-SNAPSHOT (#2166) :robot: I have created a release *beep* *boop* --- ### Updating meta-information for bleeding-edge SNAPSHOT release. --- This PR was generated with [Release Please](https://togithub.com/googleapis/release-please). See [documentation](https://togithub.com/googleapis/release-please#release-please). --- google-cloud-bigquerystorage-bom/pom.xml | 16 ++++++++-------- google-cloud-bigquerystorage/pom.xml | 4 ++-- grpc-google-cloud-bigquerystorage-v1/pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../pom.xml | 4 ++-- pom.xml | 16 ++++++++-------- proto-google-cloud-bigquerystorage-v1/pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../pom.xml | 4 ++-- samples/snapshot/pom.xml | 2 +- versions.txt | 14 +++++++------- 11 files changed, 38 insertions(+), 38 deletions(-) diff --git a/google-cloud-bigquerystorage-bom/pom.xml b/google-cloud-bigquerystorage-bom/pom.xml index 1baf525d43..4748c67fbb 100644 --- a/google-cloud-bigquerystorage-bom/pom.xml +++ b/google-cloud-bigquerystorage-bom/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage-bom - 2.39.1 + 2.39.2-SNAPSHOT pom com.google.cloud @@ -52,37 +52,37 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.39.2-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.39.2-SNAPSHOT com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index 4ed2bae1f8..3344b0fac1 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.39.2-SNAPSHOT jar BigQuery Storage https://github.com/googleapis/java-bigquerystorage @@ -11,7 +11,7 @@ com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.39.2-SNAPSHOT google-cloud-bigquerystorage diff --git a/grpc-google-cloud-bigquerystorage-v1/pom.xml b/grpc-google-cloud-bigquerystorage-v1/pom.xml index cf0432913e..1b5d1352e7 100644 --- a/grpc-google-cloud-bigquerystorage-v1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.39.2-SNAPSHOT grpc-google-cloud-bigquerystorage-v1 GRPC library for grpc-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml index 2edb61c942..259ccae9ec 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.163.2-SNAPSHOT grpc-google-cloud-bigquerystorage-v1beta1 GRPC library for grpc-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml index a1ce5951dc..3efd7165e9 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.163.2-SNAPSHOT grpc-google-cloud-bigquerystorage-v1beta2 GRPC library for grpc-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/pom.xml b/pom.xml index 6b2987a9e0..897b439686 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-bigquerystorage-parent pom - 2.39.1 + 2.39.2-SNAPSHOT BigQuery Storage Parent https://github.com/googleapis/java-bigquerystorage @@ -83,37 +83,37 @@ com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.39.2-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.163.2-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.39.2-SNAPSHOT com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.39.2-SNAPSHOT org.json diff --git a/proto-google-cloud-bigquerystorage-v1/pom.xml b/proto-google-cloud-bigquerystorage-v1/pom.xml index f2e699a552..2b6a4ae6c8 100644 --- a/proto-google-cloud-bigquerystorage-v1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.39.2-SNAPSHOT proto-google-cloud-bigquerystorage-v1 PROTO library for proto-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml index 8c2c37913d..52e7c44886 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.163.2-SNAPSHOT proto-google-cloud-bigquerystorage-v1beta1 PROTO library for proto-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml index 292fe36092..f676e54044 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.163.2-SNAPSHOT proto-google-cloud-bigquerystorage-v1beta2 PROTO library for proto-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 189dc7a992..15d9bb45a2 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -29,7 +29,7 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.39.2-SNAPSHOT diff --git a/versions.txt b/versions.txt index e13d3db2e6..7195cafbeb 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-bigquerystorage:2.39.1:2.39.1 -grpc-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.1 -grpc-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.1 -grpc-google-cloud-bigquerystorage-v1:2.39.1:2.39.1 -proto-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.1 -proto-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.1 -proto-google-cloud-bigquerystorage-v1:2.39.1:2.39.1 +google-cloud-bigquerystorage:2.39.1:2.39.2-SNAPSHOT +grpc-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.2-SNAPSHOT +grpc-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.2-SNAPSHOT +grpc-google-cloud-bigquerystorage-v1:2.39.1:2.39.2-SNAPSHOT +proto-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.2-SNAPSHOT +proto-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.2-SNAPSHOT +proto-google-cloud-bigquerystorage-v1:2.39.1:2.39.2-SNAPSHOT From 50ca432854851f7cc89cb50a327d9641000b81ee Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 26 Jun 2023 22:14:22 +0200 Subject: [PATCH 02/15] deps: update dependency com.google.cloud:google-cloud-bigquery to v2.29.0 (#2168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [com.google.cloud:google-cloud-bigquery](https://togithub.com/googleapis/java-bigquery) | `2.28.0` -> `2.29.0` | [![age](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquery/2.29.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquery/2.29.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquery/2.29.0/compatibility-slim/2.28.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquery/2.29.0/confidence-slim/2.28.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/java-bigquery ### [`v2.29.0`](https://togithub.com/googleapis/java-bigquery/blob/HEAD/CHANGELOG.md#​2290-httpsgithubcomgoogleapisjava-bigquerycomparev2280v2290-2023-06-23) [Compare Source](https://togithub.com/googleapis/java-bigquery/compare/v2.28.0...v2.29.0) ##### Features - Increase default Read API timeout to 60s ([#​2764](https://togithub.com/googleapis/java-bigquery/issues/2764)) ([f606d0b](https://togithub.com/googleapis/java-bigquery/commit/f606d0b28ca8f65654413a99ab698f35e3befce1)) ##### Dependencies - Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.12.0 ([#​2771](https://togithub.com/googleapis/java-bigquery/issues/2771)) ([7537e0f](https://togithub.com/googleapis/java-bigquery/commit/7537e0f31d8f4696559ef09c7bd284bf78217280)) - Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.23 ([#​2759](https://togithub.com/googleapis/java-bigquery/issues/2759)) ([27ba48a](https://togithub.com/googleapis/java-bigquery/commit/27ba48a0cab331f2d233ba96fed710c11d31dc53)) - Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.23 ([#​2760](https://togithub.com/googleapis/java-bigquery/issues/2760)) ([8cddf8f](https://togithub.com/googleapis/java-bigquery/commit/8cddf8fd286f51cd75aba0da6a52cbc12cab7e2a))
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/googleapis/java-bigquerystorage). --- README.md | 6 +++--- pom.xml | 2 +- samples/install-without-bom/pom.xml | 2 +- samples/snapshot/pom.xml | 2 +- samples/snippets/pom.xml | 2 +- tutorials/JsonWriterDefaultStream/pom.xml | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index ac72bff3f6..e665333b1b 100644 --- a/README.md +++ b/README.md @@ -57,13 +57,13 @@ implementation 'com.google.cloud:google-cloud-bigquerystorage' If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-bigquerystorage:2.39.0' +implementation 'com.google.cloud:google-cloud-bigquerystorage:2.39.1' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "2.39.0" +libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "2.39.1" ``` @@ -220,7 +220,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java11.html [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-bigquerystorage.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/2.39.0 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/2.39.1 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/pom.xml b/pom.xml index 897b439686..b00f229522 100644 --- a/pom.xml +++ b/pom.xml @@ -132,7 +132,7 @@ com.google.cloud google-cloud-bigquery - 2.28.0 + 2.29.0 test diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index b5a3df7383..8b916f12d3 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -37,7 +37,7 @@ com.google.cloud google-cloud-bigquery - 2.28.0 + 2.29.0 org.apache.avro diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 15d9bb45a2..c036461d94 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -36,7 +36,7 @@ com.google.cloud google-cloud-bigquery - 2.28.0 + 2.29.0 org.apache.avro diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index b9229ebb88..e2d2144191 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -48,7 +48,7 @@ com.google.cloud google-cloud-bigquery - 2.28.0 + 2.29.0 org.apache.avro diff --git a/tutorials/JsonWriterDefaultStream/pom.xml b/tutorials/JsonWriterDefaultStream/pom.xml index 7be2863855..9dcd50faf0 100644 --- a/tutorials/JsonWriterDefaultStream/pom.xml +++ b/tutorials/JsonWriterDefaultStream/pom.xml @@ -24,7 +24,7 @@ com.google.cloud google-cloud-bigquery - 2.28.0 + 2.29.0 org.apache.avro From 8a51fae180ced3b362acc350999157d3d6e0da6a Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 28 Jun 2023 20:52:12 +0200 Subject: [PATCH 03/15] deps: update dependency com.google.auto.value:auto-value-annotations to v1.10.2 (#2172) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [com.google.auto.value:auto-value-annotations](https://togithub.com/google/auto/tree/main/value) ([source](https://togithub.com/google/auto)) | `1.10.1` -> `1.10.2` | [![age](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value-annotations/1.10.2/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value-annotations/1.10.2/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value-annotations/1.10.2/compatibility-slim/1.10.1)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value-annotations/1.10.2/confidence-slim/1.10.1)](https://docs.renovatebot.com/merge-confidence/) | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/googleapis/java-bigquerystorage). --- README.md | 2 +- google-cloud-bigquerystorage/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e665333b1b..651518c7e6 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.17.0') +implementation platform('com.google.cloud:libraries-bom:26.18.0') implementation 'com.google.cloud:google-cloud-bigquerystorage' ``` diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index 3344b0fac1..89a9d8b2be 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -80,7 +80,7 @@ com.google.auto.value auto-value-annotations - 1.10.1 + 1.10.2 com.google.protobuf From 1b94f9748f42f8d20c4200ee2011c8f1a05562e4 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 28 Jun 2023 20:52:17 +0200 Subject: [PATCH 04/15] chore(deps): update dependency com.google.cloud:google-cloud-bigquerystorage to v2.39.1 (#2167) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [com.google.cloud:google-cloud-bigquerystorage](https://togithub.com/googleapis/java-bigquerystorage) | `2.39.0` -> `2.39.1` | [![age](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquerystorage/2.39.1/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquerystorage/2.39.1/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquerystorage/2.39.1/compatibility-slim/2.39.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/maven/com.google.cloud:google-cloud-bigquerystorage/2.39.1/confidence-slim/2.39.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/java-bigquerystorage ### [`v2.39.1`](https://togithub.com/googleapis/java-bigquerystorage/blob/HEAD/CHANGELOG.md#​2391-httpsgithubcomgoogleapisjava-bigquerystoragecomparev2390v2391-2023-06-22) [Compare Source](https://togithub.com/googleapis/java-bigquerystorage/compare/v2.39.0...v2.39.1) ##### Dependencies - Update dependency com.google.cloud:google-cloud-bigquery to v2.27.1 ([#​2150](https://togithub.com/googleapis/java-bigquerystorage/issues/2150)) ([da736a6](https://togithub.com/googleapis/java-bigquerystorage/commit/da736a65378d007930e0afb9246d0f53bb41e0c3)) - Update dependency com.google.cloud:google-cloud-bigquery to v2.28.0 ([#​2163](https://togithub.com/googleapis/java-bigquerystorage/issues/2163)) ([088219e](https://togithub.com/googleapis/java-bigquerystorage/commit/088219effe0528df7c998c6e71adc62025d3b204)) - Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.12.0 ([#​2160](https://togithub.com/googleapis/java-bigquerystorage/issues/2160)) ([069165a](https://togithub.com/googleapis/java-bigquerystorage/commit/069165aa9e3644ae84bcffa501bee175623ee9b3)) - Update dependency com.google.http-client:google-http-client to v1.43.3 ([#​2156](https://togithub.com/googleapis/java-bigquerystorage/issues/2156)) ([814e826](https://togithub.com/googleapis/java-bigquerystorage/commit/814e8261689351bc88202be37975e78415192de5)) - Update dependency com.google.truth:truth to v1.1.5 ([#​2153](https://togithub.com/googleapis/java-bigquerystorage/issues/2153)) ([bf9f1da](https://togithub.com/googleapis/java-bigquerystorage/commit/bf9f1da8a3de27a775e7c3a58076dd06b026c459)) - Update dependency org.json:json to v20230618 ([#​2154](https://togithub.com/googleapis/java-bigquerystorage/issues/2154)) ([8e7b42a](https://togithub.com/googleapis/java-bigquerystorage/commit/8e7b42aeac292b3291a505e2a456499553951a8f))
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/googleapis/java-bigquerystorage). --- README.md | 2 +- samples/install-without-bom/pom.xml | 2 +- tutorials/JsonWriterDefaultStream/pom.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 651518c7e6..7247653874 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ If you are using Maven without the BOM, add this to your dependencies: com.google.cloud google-cloud-bigquerystorage - 2.39.0 + 2.39.1 ``` diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index 8b916f12d3..4fe8193bf3 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -30,7 +30,7 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.0 + 2.39.1 diff --git a/tutorials/JsonWriterDefaultStream/pom.xml b/tutorials/JsonWriterDefaultStream/pom.xml index 9dcd50faf0..429e39be02 100644 --- a/tutorials/JsonWriterDefaultStream/pom.xml +++ b/tutorials/JsonWriterDefaultStream/pom.xml @@ -19,7 +19,7 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.0 + 2.39.1 com.google.cloud From c7e5ef6eb93f27c8a104492835b5b93013eb76a3 Mon Sep 17 00:00:00 2001 From: Tomo Suzuki Date: Wed, 28 Jun 2023 15:46:52 -0400 Subject: [PATCH 05/15] ci: javadoc job (JDK 17) in ci.yaml (#2173) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: javadoc job (JDK 17) in ci.yaml This also changes the JDK distribution from zulu to temurin https://github.com/actions/setup-java#eclipse-temurin * fix javadoc * javadoc fix * lint fix * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .github/workflows/ci.yaml | 22 ++++++++++++++---- .../bigquery/storage/v1/CivilTimeEncoder.java | 1 + .../bigquery/storage/v1/ConnectionWorker.java | 3 ++- .../cloud/bigquery/storage/v1/Exceptions.java | 14 ++++++----- .../bigquery/storage/v1/JsonStreamWriter.java | 8 +++---- .../storage/v1/SchemaAwareStreamWriter.java | 8 +++---- .../bigquery/storage/v1/StreamWriter.java | 7 +++--- .../storage/v1beta2/CivilTimeEncoder.java | 1 + .../storage/v1beta2/JsonStreamWriter.java | 8 +++---- .../storage/v1/JsonStreamWriterTest.java | 6 +++-- .../storage/v1/MockBigQueryWriteImpl.java | 3 ++- .../it/ITBigQueryWriteManualClientTest.java | 12 ++++++---- .../v1beta1/MockBigQueryStorageImpl.java | 3 ++- .../v1beta2/MockBigQueryWriteImpl.java | 3 ++- .../bigquery/storage/v1beta1/ReadOptions.java | 23 +++++++++++++++++++ .../bigquery/storage/v1beta1/Storage.java | 10 ++++++++ .../bigquery/storage/v1beta2/StreamProto.java | 4 ++-- 17 files changed, 98 insertions(+), 38 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 53ebea9e6a..01a5ce1602 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: ${{matrix.java}} - run: java -version - run: .kokoro/build.sh @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: 8 - run: java -version - run: .kokoro/build.bat @@ -66,17 +66,29 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: ${{matrix.java}} - run: java -version - run: .kokoro/dependencies.sh + javadoc: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-java@v3 + with: + distribution: temurin + java-version: 17 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: javadoc lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: 11 - run: java -version - run: .kokoro/build.sh @@ -88,7 +100,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: 8 - run: java -version - run: .kokoro/build.sh diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java index be2d1fd929..e52ada64d8 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java @@ -30,6 +30,7 @@ *

The valid range and number of bits required by each date/time field is as the following: * * + * * * * diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java index 7e86da4d81..1f0e1e1989 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java @@ -843,7 +843,8 @@ private void requestCallback(AppendRowsResponse response) { log.fine( String.format( - "Got response with schema updated (omitting updated schema in response here): %s writer id %s", + "Got response with schema updated (omitting updated schema in response here): %s" + + " writer id %s", responseWithUpdatedSchemaRemoved.toString(), writerId)); } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java index 2f9083e4e9..fc4d4d8268 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java @@ -243,10 +243,10 @@ public String getStreamName() { } /** - * This exception is thrown from {@link SchemaAwareStreamWriter#append()} when the client side - * Proto serialization fails. It can also be thrown by the server in case rows contains invalid - * data. The exception contains a Map of indexes of faulty rows and the corresponding error - * message. + * This exception is thrown from {@link SchemaAwareStreamWriter#append(Iterable)} when the client + * side Proto serialization fails. It can also be thrown by the server in case rows contains + * invalid data. The exception contains a Map of indexes of faulty rows and the corresponding + * error message. */ public static class AppendSerializationError extends AppendSerializtionError { @@ -344,7 +344,8 @@ protected InflightRequestsLimitExceededException(String writerId, long currentLi super( Status.fromCode(Status.Code.RESOURCE_EXHAUSTED) .withDescription( - "Exceeds client side inflight buffer, consider add more buffer or open more connections. Current limit: " + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections. Current limit: " + currentLimit), writerId, currentLimit); @@ -356,7 +357,8 @@ protected InflightBytesLimitExceededException(String writerId, long currentLimit super( Status.fromCode(Status.Code.RESOURCE_EXHAUSTED) .withDescription( - "Exceeds client side inflight buffer, consider add more buffer or open more connections. Current limit: " + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections. Current limit: " + currentLimit), writerId, currentLimit); diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java index 0d47910e7b..79d2582a89 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java @@ -54,8 +54,8 @@ private JsonStreamWriter(SchemaAwareStreamWriter.Builder builder) * is created with the updated TableSchema. * * @param jsonArr The JSON array that contains JSONObjects to be written - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr) throws IOException, Descriptors.DescriptorValidationException { @@ -70,8 +70,8 @@ public ApiFuture append(JSONArray jsonArr) * * @param jsonArr The JSON array that contains JSONObjects to be written * @param offset Offset for deduplication - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr, long offset) throws IOException, Descriptors.DescriptorValidationException { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java index 10fceeee68..cbe5d63478 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java @@ -101,8 +101,8 @@ private SchemaAwareStreamWriter(Builder builder) * created with the updated TableSchema. * * @param items The array that contains objects to be written - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(Iterable items) throws IOException, DescriptorValidationException { @@ -158,8 +158,8 @@ private Message buildMessage(T item) * * @param items The collection that contains objects to be written * @param offset Offset for deduplication - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(Iterable items, long offset) throws IOException, DescriptorValidationException { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java index 6ab9346da5..d6f0f99ca9 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java @@ -247,7 +247,8 @@ private StreamWriter(Builder builder) throws IOException { String fetchedLocation = writeStream.getLocation(); log.info( String.format( - "Fethed location %s for stream name %s, extracted project and dataset name: %s\"", + "Fethed location %s for stream name %s, extracted project and dataset" + + " name: %s\"", fetchedLocation, streamName, datasetAndProjectName)); return fetchedLocation; }); @@ -494,12 +495,12 @@ public void close() { singleConnectionOrConnectionPool.close(this); } - /** Constructs a new {@link StreamWriterV2.Builder} using the given stream and client. */ + /** Constructs a new {@link StreamWriter.Builder} using the given stream and client. */ public static StreamWriter.Builder newBuilder(String streamName, BigQueryWriteClient client) { return new StreamWriter.Builder(streamName, client); } - /** Constructs a new {@link StreamWriterV2.Builder} using the given stream. */ + /** Constructs a new {@link StreamWriter.Builder} using the given stream. */ public static StreamWriter.Builder newBuilder(String streamName) { return new StreamWriter.Builder(streamName); } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java index 1ab8d9eb17..1ff98117b0 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java @@ -30,6 +30,7 @@ *

The valid range and number of bits required by each date/time field is as the following: * *

Range and bits for date/time fields
Field Range #Bits
Year [1, 9999] 14
Month [1, 12] 4
+ * * * * diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java index 5837e5c4a5..a86a35b7ab 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java @@ -83,8 +83,8 @@ private JsonStreamWriter(Builder builder) * data to protobuf messages, then using StreamWriter's append() to write the data. * * @param jsonArr The JSON array that contains JSONObjects to be written - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr) { return append(jsonArr, -1); @@ -96,8 +96,8 @@ public ApiFuture append(JSONArray jsonArr) { * * @param jsonArr The JSON array that contains JSONObjects to be written * @param offset Offset for deduplication - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr, long offset) { ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java index eed96886a4..a6ad2df000 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java @@ -1280,7 +1280,8 @@ public void run() throws Throwable { ex.getStatus() .getDescription() .contains( - "Exceeds client side inflight buffer, consider add more buffer or open more connections")); + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections")); } } @@ -1342,7 +1343,8 @@ public void testMultipleAppendSerializationErrors() "The source object has fields unknown to BigQuery: root.not_foo.", rowIndexToErrorMessage.get(0)); assertEquals( - "Field root.foo failed to convert to STRING. Error: JSONObject does not have a string field at root.foo.", + "Field root.foo failed to convert to STRING. Error: JSONObject does not have a string" + + " field at root.foo.", rowIndexToErrorMessage.get(2)); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java index cadc196f5c..f10e9f3a9d 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java @@ -174,7 +174,8 @@ public void batchCommitWriteStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" + + " %s", response == null ? "null" : response.getClass().getName(), BatchCommitWriteStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java index 1e73643eb8..c29c03d178 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java @@ -376,13 +376,15 @@ public void testRowErrors() AppendSerializationError e = (AppendSerializationError) t; LOG.info("Found row errors on stream: " + e.getStreamName()); assertEquals( - "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field foo.", + "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field" + + " foo.", e.getRowIndexToErrorMessage().get(0)); assertEquals( "Timestamp field value is out of range: -9223372036854775808 on field bar.", e.getRowIndexToErrorMessage().get(1)); assertEquals( - "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field foo.", + "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field" + + " foo.", e.getRowIndexToErrorMessage().get(2)); for (Map.Entry entry : e.getRowIndexToErrorMessage().entrySet()) { LOG.info("Bad row index: " + entry.getKey() + ", has problem: " + entry.getValue()); @@ -1035,10 +1037,12 @@ public void testComplicateSchemaWithPendingStream() Iterator queryIter = queryResult.getValues().iterator(); assertTrue(queryIter.hasNext()); assertEquals( - "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=aaa}, FieldValue{attribute=PRIMITIVE, value=aaa}]}]", + "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=aaa}," + + " FieldValue{attribute=PRIMITIVE, value=aaa}]}]", queryIter.next().get(1).getRepeatedValue().toString()); assertEquals( - "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=bbb}, FieldValue{attribute=PRIMITIVE, value=bbb}]}]", + "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=bbb}," + + " FieldValue{attribute=PRIMITIVE, value=bbb}]}]", queryIter.next().get(1).getRepeatedValue().toString()); assertFalse(queryIter.hasNext()); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java index b1d6e58aae..889be3bbb8 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java @@ -117,7 +117,8 @@ public void batchCreateReadSessionStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCreateReadSessionStreams, expected %s or %s", + "Unrecognized response type %s for method BatchCreateReadSessionStreams, expected" + + " %s or %s", response == null ? "null" : response.getClass().getName(), Storage.BatchCreateReadSessionStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java index 098a1e7fa4..814d5b73ff 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java @@ -174,7 +174,8 @@ public void batchCommitWriteStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" + + " %s", response == null ? "null" : response.getClass().getName(), BatchCommitWriteStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java index 9c058789f6..f0070660b2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java @@ -90,6 +90,7 @@ public interface TableReadOptionsOrBuilder * @return A list containing the selectedFields. */ java.util.List getSelectedFieldsList(); + /** * * @@ -148,6 +149,7 @@ public interface TableReadOptionsOrBuilder * @return The count of selectedFields. */ int getSelectedFieldsCount(); + /** * * @@ -207,6 +209,7 @@ public interface TableReadOptionsOrBuilder * @return The selectedFields at the given index. */ java.lang.String getSelectedFields(int index); + /** * * @@ -288,6 +291,7 @@ public interface TableReadOptionsOrBuilder * @return The rowRestriction. */ java.lang.String getRowRestriction(); + /** * * @@ -420,6 +424,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { return selectedFields_; } + /** * * @@ -480,6 +485,7 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { public int getSelectedFieldsCount() { return selectedFields_.size(); } + /** * * @@ -541,6 +547,7 @@ public int getSelectedFieldsCount() { public java.lang.String getSelectedFields(int index) { return selectedFields_.get(index); } + /** * * @@ -607,6 +614,7 @@ public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { @SuppressWarnings("serial") private volatile java.lang.Object rowRestriction_ = ""; + /** * * @@ -639,6 +647,7 @@ public java.lang.String getRowRestriction() { return s; } } + /** * * @@ -1078,6 +1087,7 @@ private void ensureSelectedFieldsIsMutable() { } bitField0_ |= 0x00000001; } + /** * * @@ -1139,6 +1149,7 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { selectedFields_.makeImmutable(); return selectedFields_; } + /** * * @@ -1199,6 +1210,7 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { public int getSelectedFieldsCount() { return selectedFields_.size(); } + /** * * @@ -1260,6 +1272,7 @@ public int getSelectedFieldsCount() { public java.lang.String getSelectedFields(int index) { return selectedFields_.get(index); } + /** * * @@ -1321,6 +1334,7 @@ public java.lang.String getSelectedFields(int index) { public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { return selectedFields_.getByteString(index); } + /** * * @@ -1390,6 +1404,7 @@ public Builder setSelectedFields(int index, java.lang.String value) { onChanged(); return this; } + /** * * @@ -1458,6 +1473,7 @@ public Builder addSelectedFields(java.lang.String value) { onChanged(); return this; } + /** * * @@ -1523,6 +1539,7 @@ public Builder addAllSelectedFields(java.lang.Iterable values) onChanged(); return this; } + /** * * @@ -1587,6 +1604,7 @@ public Builder clearSelectedFields() { onChanged(); return this; } + /** * * @@ -1658,6 +1676,7 @@ public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { } private java.lang.Object rowRestriction_ = ""; + /** * * @@ -1689,6 +1708,7 @@ public java.lang.String getRowRestriction() { return (java.lang.String) ref; } } + /** * * @@ -1720,6 +1740,7 @@ public com.google.protobuf.ByteString getRowRestrictionBytes() { return (com.google.protobuf.ByteString) ref; } } + /** * * @@ -1750,6 +1771,7 @@ public Builder setRowRestriction(java.lang.String value) { onChanged(); return this; } + /** * * @@ -1776,6 +1798,7 @@ public Builder clearRowRestriction() { onChanged(); return this; } + /** * * diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java index 8713b6a98f..a695486fc4 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java @@ -12454,6 +12454,7 @@ public Builder clearStatus() { : status_; } } + /** * * @@ -12486,6 +12487,7 @@ public Builder clearStatus() { com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder, com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder> throttleStatusBuilder_; + /** * * @@ -12501,6 +12503,7 @@ public Builder clearStatus() { public boolean hasThrottleStatus() { return ((bitField0_ & 0x00000010) != 0); } + /** * * @@ -12523,6 +12526,7 @@ public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus getThrot return throttleStatusBuilder_.getMessage(); } } + /** * * @@ -12547,6 +12551,7 @@ public Builder setThrottleStatus( onChanged(); return this; } + /** * * @@ -12569,6 +12574,7 @@ public Builder setThrottleStatus( onChanged(); return this; } + /** * * @@ -12598,6 +12604,7 @@ public Builder mergeThrottleStatus( onChanged(); return this; } + /** * * @@ -12618,6 +12625,7 @@ public Builder clearThrottleStatus() { onChanged(); return this; } + /** * * @@ -12634,6 +12642,7 @@ public Builder clearThrottleStatus() { onChanged(); return getThrottleStatusFieldBuilder().getBuilder(); } + /** * * @@ -12655,6 +12664,7 @@ public Builder clearThrottleStatus() { : throttleStatus_; } } + /** * * diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java index 1fd91308f5..e9d0a925a8 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java @@ -56,8 +56,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { static { java.lang.String[] descriptorData = { - "\n2google/cloud/bigquery/storage/v1beta2/" - + "stream.proto\022%google.cloud.bigquery.stor" + "\n" + + "2google/cloud/bigquery/storage/v1beta2/stream.proto\022%google.cloud.bigquery.stor" + "age.v1beta2\032\037google/api/field_behavior.p" + "roto\032\031google/api/resource.proto\0321google/" + "cloud/bigquery/storage/v1beta2/arrow.pro" From 9255630f1e631e48d17190d0d3b5b43abfc694c8 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 29 Jun 2023 16:16:18 +0200 Subject: [PATCH 06/15] chore(deps): update dependency com.google.cloud:libraries-bom to v26.18.0 (#2174) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [com.google.cloud:libraries-bom](https://cloud.google.com/java/docs/bom) ([source](https://togithub.com/googleapis/java-cloud-bom)) | `26.17.0` -> `26.18.0` | [![age](https://badges.renovateapi.com/packages/maven/com.google.cloud:libraries-bom/26.18.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/maven/com.google.cloud:libraries-bom/26.18.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/maven/com.google.cloud:libraries-bom/26.18.0/compatibility-slim/26.17.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/maven/com.google.cloud:libraries-bom/26.18.0/confidence-slim/26.17.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/java-cloud-bom (com.google.cloud:libraries-bom) ### [`v26.18.0`](https://togithub.com/googleapis/java-cloud-bom/blob/HEAD/CHANGELOG.md#​26180-httpsgithubcomgoogleapisjava-cloud-bomcomparev26170v26180-2023-06-28) ##### Dependencies - update dependency com.google.cloud:first-party-dependencies to v3.12.0 ([#​6057](https://togithub.com/googleapis/java-cloud-bom/issues/6057)) ([4c52805](https://togithub.com/googleapis/java-cloud-bom/commit/4c52805563963c5f03d75542d83a86785ddde2eb)) - update dependency com.google.cloud:gapic-libraries-bom to v1.14.0 ([#​6067](https://togithub.com/googleapis/java-cloud-bom/issues/6067)) ([2ba5fb8](https://togithub.com/googleapis/java-cloud-bom/commit/2ba5fb8a1a5de8ab5f925313e68128cb914b5e6b)) - update dependency com.google.cloud:google-cloud-bigquery to v2.29.0 ([#​6051](https://togithub.com/googleapis/java-cloud-bom/issues/6051)) ([f0e719c](https://togithub.com/googleapis/java-cloud-bom/commit/f0e719cbe972e09a9b75d0f5841122518123248a)) - update dependency com.google.cloud:google-cloud-bigquerystorage-bom to v2.39.1 ([#​6058](https://togithub.com/googleapis/java-cloud-bom/issues/6058)) ([795e555](https://togithub.com/googleapis/java-cloud-bom/commit/795e5550a97e9724bfc777a7295e962ea6ebedf5)) - update dependency com.google.cloud:google-cloud-bigtable-bom to v2.24.1 ([#​6076](https://togithub.com/googleapis/java-cloud-bom/issues/6076)) ([47b8b96](https://togithub.com/googleapis/java-cloud-bom/commit/47b8b96e0660434628cdcdd68ea9b66d38bf51a0)) - update dependency com.google.cloud:google-cloud-datastore-bom to v2.16.0 ([#​6066](https://togithub.com/googleapis/java-cloud-bom/issues/6066)) ([4b3b665](https://togithub.com/googleapis/java-cloud-bom/commit/4b3b66506c4f7ea53f790d5e95ca6f687ec2c512)) - update dependency com.google.cloud:google-cloud-firestore-bom to v3.13.2 ([#​6052](https://togithub.com/googleapis/java-cloud-bom/issues/6052)) ([dfd0068](https://togithub.com/googleapis/java-cloud-bom/commit/dfd00685d239bf5cd6613221874fcac02f0e2eaf)) - update dependency com.google.cloud:google-cloud-logging-bom to v3.15.5 ([#​6055](https://togithub.com/googleapis/java-cloud-bom/issues/6055)) ([23ba420](https://togithub.com/googleapis/java-cloud-bom/commit/23ba420a4a1eaa4baed11161871278311ff6e4dd)) - update dependency com.google.cloud:google-cloud-logging-logback to v0.130.17-alpha ([#​6061](https://togithub.com/googleapis/java-cloud-bom/issues/6061)) ([5182d43](https://togithub.com/googleapis/java-cloud-bom/commit/5182d43e53a1dfda0776f1f9559620d643bfedca)) - update dependency com.google.cloud:google-cloud-nio to v0.126.18 ([#​6060](https://togithub.com/googleapis/java-cloud-bom/issues/6060)) ([9bbf047](https://togithub.com/googleapis/java-cloud-bom/commit/9bbf047c93ca1f6929e79eb5ceee29f2f0acb9d2)) - update dependency com.google.cloud:google-cloud-pubsub-bom to v1.123.15 ([#​6059](https://togithub.com/googleapis/java-cloud-bom/issues/6059)) ([7d5eecd](https://togithub.com/googleapis/java-cloud-bom/commit/7d5eecda25b4270dfe0add8adabc277f8d6c5a6e)) - update dependency com.google.cloud:google-cloud-pubsub-bom to v1.123.16 ([#​6070](https://togithub.com/googleapis/java-cloud-bom/issues/6070)) ([9a627d4](https://togithub.com/googleapis/java-cloud-bom/commit/9a627d49381dd10304965ad080d270e66650965a)) - update dependency com.google.cloud:google-cloud-pubsub-bom to v1.123.17 ([#​6073](https://togithub.com/googleapis/java-cloud-bom/issues/6073)) ([3f3a096](https://togithub.com/googleapis/java-cloud-bom/commit/3f3a0967049de9f401afa6c4cb5c306fe7dd7025)) - update dependency com.google.cloud:google-cloud-pubsublite-bom to v1.12.10 ([#​6077](https://togithub.com/googleapis/java-cloud-bom/issues/6077)) ([8ef9b9c](https://togithub.com/googleapis/java-cloud-bom/commit/8ef9b9c773052a9d9a3d07f4877e0a31213417f6)) - update dependency com.google.cloud:google-cloud-pubsublite-bom to v1.12.9 ([#​6065](https://togithub.com/googleapis/java-cloud-bom/issues/6065)) ([d1005e9](https://togithub.com/googleapis/java-cloud-bom/commit/d1005e9b3c4ffa21ceead5e923e020935f1eaa58)) - update dependency com.google.cloud:google-cloud-spanner-bom to v6.43.1 ([#​6071](https://togithub.com/googleapis/java-cloud-bom/issues/6071)) ([9e51be0](https://togithub.com/googleapis/java-cloud-bom/commit/9e51be059519912d5703288efedfa02d876fee06)) - update dependency com.google.cloud:google-cloud-spanner-jdbc to v2.11.1 ([#​6072](https://togithub.com/googleapis/java-cloud-bom/issues/6072)) ([05565e8](https://togithub.com/googleapis/java-cloud-bom/commit/05565e8ead5f86451bdcc911953219fa9e18ca57)) - update dependency com.google.cloud:google-cloud-spanner-jdbc to v2.11.2 ([#​6074](https://togithub.com/googleapis/java-cloud-bom/issues/6074)) ([236016b](https://togithub.com/googleapis/java-cloud-bom/commit/236016bf309a9e4e1d657d58cf19608ec4b06a05)) - update dependency com.google.cloud:google-cloud-storage-bom to v2.22.5 ([#​6064](https://togithub.com/googleapis/java-cloud-bom/issues/6064)) ([2315aeb](https://togithub.com/googleapis/java-cloud-bom/commit/2315aeb00346bb56a26d53178917f17df7644525)) - update dependency com.google.cloud.tools:dependencies to v1.5.13 ([#​6046](https://togithub.com/googleapis/java-cloud-bom/issues/6046)) ([72382c3](https://togithub.com/googleapis/java-cloud-bom/commit/72382c3db2833da664363bc9cbdea9f22fe60c19))
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/googleapis/java-bigquerystorage). --- README.md | 2 +- samples/snippets/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7247653874..9bf5e6afb5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: com.google.cloud libraries-bom - 26.17.0 + 26.18.0 pom import diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index e2d2144191..936fd3901b 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -31,7 +31,7 @@ com.google.cloud libraries-bom - 26.17.0 + 26.18.0 pom import From 721908d412f1d82aff9aed8edcf727fc5b1bf950 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 29 Jun 2023 16:18:13 +0200 Subject: [PATCH 07/15] deps: update dependency com.google.auto.value:auto-value to v1.10.2 (#2171) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [com.google.auto.value:auto-value](https://togithub.com/google/auto/tree/main/value) ([source](https://togithub.com/google/auto)) | `1.10.1` -> `1.10.2` | [![age](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value/1.10.2/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value/1.10.2/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value/1.10.2/compatibility-slim/1.10.1)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/maven/com.google.auto.value:auto-value/1.10.2/confidence-slim/1.10.1)](https://docs.renovatebot.com/merge-confidence/) | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/googleapis/java-bigquerystorage). --- google-cloud-bigquerystorage/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index 89a9d8b2be..ccded10323 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -75,7 +75,7 @@ com.google.auto.value auto-value - 1.10.1 + 1.10.2 com.google.auto.value From 494ce8513e8925b4330a2bf45641ba38db625c1d Mon Sep 17 00:00:00 2001 From: Siddharth Agrawal Date: Fri, 30 Jun 2023 08:38:39 -0700 Subject: [PATCH 08/15] fix: support DATETIME field that has a space between date and time and has only date (#2176) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: support DATETIME field that has a space between date and time and has only date. * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../storage/v1/JsonToProtoMessage.java | 25 ++++++++++- .../storage/v1/JsonToProtoMessageTest.java | 42 ++++++++++++++++++- 2 files changed, 63 insertions(+), 4 deletions(-) diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java index 3d1e1e0b5d..f4b77151e4 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java @@ -100,6 +100,25 @@ public class JsonToProtoMessage implements ToProtoConverter { .toFormatter() .withZone(ZoneOffset.UTC); + private static final DateTimeFormatter DATETIME_FORMATTER = + new DateTimeFormatterBuilder() + .parseLenient() + .append(DateTimeFormatter.ISO_LOCAL_DATE) + .optionalStart() + .optionalStart() + .parseCaseInsensitive() + .appendLiteral('T') + .optionalEnd() + .optionalStart() + .appendLiteral(' ') + .optionalEnd() + .append(DateTimeFormatter.ISO_LOCAL_TIME) + .optionalEnd() + .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) + .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .toFormatter(); + /** You can use {@link #INSTANCE} instead */ public JsonToProtoMessage() {} @@ -402,7 +421,8 @@ private void fillField( if (val instanceof String) { protoMsg.setField( fieldDescriptor, - CivilTimeEncoder.encodePacked64DatetimeMicros(LocalDateTime.parse((String) val))); + CivilTimeEncoder.encodePacked64DatetimeMicros( + LocalDateTime.parse((String) val, DATETIME_FORMATTER))); return; } else if (val instanceof Long) { protoMsg.setField(fieldDescriptor, val); @@ -663,7 +683,8 @@ private void fillRepeatedField( if (val instanceof String) { protoMsg.addRepeatedField( fieldDescriptor, - CivilTimeEncoder.encodePacked64DatetimeMicros(LocalDateTime.parse((String) val))); + CivilTimeEncoder.encodePacked64DatetimeMicros( + LocalDateTime.parse((String) val, DATETIME_FORMATTER))); } else if (val instanceof Long) { protoMsg.addRepeatedField(fieldDescriptor, val); } else { diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java index 5c44d014d4..5ee64ab115 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java @@ -608,7 +608,7 @@ public void testDateTimeMismatch() throws Exception { TableFieldSchema.newBuilder() .setName("datetime") .setType(TableFieldSchema.Type.DATETIME) - .setMode(TableFieldSchema.Mode.REPEATED) + .setMode(TableFieldSchema.Mode.NULLABLE) .build(); TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); JSONObject json = new JSONObject(); @@ -623,6 +623,34 @@ public void testDateTimeMismatch() throws Exception { } } + private void dateTimeMatch_Internal(String jsonVal, Long expectedVal) throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setName("datetime") + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); + TestDatetime expectedProto = TestDatetime.newBuilder().setDatetime(expectedVal).build(); + JSONObject json = new JSONObject(); + json.put("datetime", jsonVal); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestDatetime.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + public void testDateTimeMatch() throws Exception { + dateTimeMatch_Internal("2021-09-27T20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27t20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27 20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-9-27T20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27T00:00:00", 142258525253402624L); + dateTimeMatch_Internal("2021-09-27T00:0:00", 142258525253402624L); + dateTimeMatch_Internal("2021-09-27", 142258525253402624L); + } + @Test public void testTimeMismatch() throws Exception { TableFieldSchema field = @@ -952,6 +980,9 @@ public void testStructComplex() throws Exception { .setTestDate(1) .setTestDatetime(1) .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258525253402624L) .addTestDatetimeStr(142258525253402624L) .setComplexLvl1( ComplexLvl1.newBuilder() @@ -1020,7 +1051,14 @@ public void testStructComplex() throws Exception { json.put("test_datetime", 1); json.put( "test_datetime_str", - new JSONArray(new String[] {"2021-09-27T20:51:10.752", "2021-09-27T00:00:00"})); + new JSONArray( + new String[] { + "2021-09-27T20:51:10.752", + "2021-09-27t20:51:10.752", + "2021-09-27 20:51:10.752", + "2021-09-27T00:00:00", + "2021-09-27" + })); json.put("complex_lvl1", complex_lvl1); json.put("complex_lvl2", complex_lvl2); json.put( From e5bb5d099ea0272c4bd447b7f8fef5207c14ffc5 Mon Sep 17 00:00:00 2001 From: Siddharth Agrawal Date: Mon, 10 Jul 2023 09:39:42 -0700 Subject: [PATCH 09/15] fix: interpret Integer and Float values for TIMESTAMP as microseconds (#2175) --- .../storage/v1/JsonToProtoMessage.java | 8 +- .../storage/v1/JsonToProtoMessageTest.java | 79 +++++++++++++++++-- .../src/test/proto/jsonTest.proto | 11 +++ 3 files changed, 89 insertions(+), 9 deletions(-) diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java index f4b77151e4..52faf55742 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java @@ -442,7 +442,7 @@ private void fillField( if (val instanceof String) { Double parsed = Doubles.tryParse((String) val); if (parsed != null) { - protoMsg.setField(fieldDescriptor, parsed.longValue() * 10000000); + protoMsg.setField(fieldDescriptor, parsed.longValue()); return; } TemporalAccessor parsedTime = TIMESTAMP_FORMATTER.parse((String) val); @@ -455,7 +455,7 @@ private void fillField( protoMsg.setField(fieldDescriptor, val); return; } else if (val instanceof Integer) { - protoMsg.setField(fieldDescriptor, Long.valueOf((Integer) val) * 10000000); + protoMsg.setField(fieldDescriptor, Long.valueOf((Integer) val)); return; } } @@ -705,7 +705,7 @@ private void fillRepeatedField( if (val instanceof String) { Double parsed = Doubles.tryParse((String) val); if (parsed != null) { - protoMsg.addRepeatedField(fieldDescriptor, parsed.longValue() * 10000000); + protoMsg.addRepeatedField(fieldDescriptor, parsed.longValue()); } else { TemporalAccessor parsedTime = TIMESTAMP_FORMATTER.parse((String) val); protoMsg.addRepeatedField( @@ -716,7 +716,7 @@ private void fillRepeatedField( } else if (val instanceof Long) { protoMsg.addRepeatedField(fieldDescriptor, val); } else if (val instanceof Integer) { - protoMsg.addRepeatedField(fieldDescriptor, ((Integer) val) * 10000000); + protoMsg.addRepeatedField(fieldDescriptor, Long.valueOf((Integer) val)); } else { throwWrongFieldType(fieldDescriptor, currentScope, index); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java index 5ee64ab115..d6da83275d 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java @@ -393,6 +393,12 @@ public class JsonToProtoMessageTest { .setMode(TableFieldSchema.Mode.NULLABLE) .setName("test_timestamp") .build(); + private final TableFieldSchema TEST_TIMESTAMP_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_timestamp_repeated") + .build(); private final TableFieldSchema TEST_TIME = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.TIME) @@ -787,9 +793,9 @@ public void testTimestamp() throws Exception { TestTimestamp.newBuilder() .setTestString(10L) .setTestStringTZ(1648493279010000L) - .setTestLong(0L) - .setTestInt(1534806950000000L) - .setTestFloat(1534680695000000000L) + .setTestLong(1687984085000000L) + .setTestInt(153480695L) + .setTestFloat(153468069500L) .setTestOffset(1649135171000000L) .setTestTimezone(1649174771000000L) .setTestSaformat(1534680660000000L) @@ -797,7 +803,7 @@ public void testTimestamp() throws Exception { JSONObject json = new JSONObject(); json.put("test_string", "1970-01-01 00:00:00.000010"); json.put("test_string_T_Z", "2022-03-28T18:47:59.01Z"); - json.put("test_long", 0L); + json.put("test_long", 1687984085000000L); json.put("test_int", 153480695); json.put("test_float", "1.534680695e11"); json.put("test_offset", "2022-04-05T09:06:11+04:00"); @@ -809,6 +815,69 @@ public void testTimestamp() throws Exception { assertEquals(expectedProto, protoMsg); } + @Test + public void testTimestampRepeated() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_string_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_string_T_Z_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_long_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_int_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_float_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_timezone_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_saformat_repeated") + .build()) + .build(); + TestRepeatedTimestamp expectedProto = + TestRepeatedTimestamp.newBuilder() + .addTestStringRepeated(10L) + .addTestStringTZRepeated(1648493279010000L) + .addTestLongRepeated(1687984085000000L) + .addTestIntRepeated(153480695L) + .addTestFloatRepeated(153468069500L) + .addTestOffsetRepeated(1649135171000000L) + .addTestTimezoneRepeated(1649174771000000L) + .addTestSaformatRepeated(1534680660000000L) + .build(); + JSONObject json = new JSONObject(); + json.put("test_string_repeated", new JSONArray(new String[] {"1970-01-01 00:00:00.000010"})); + json.put("test_string_T_Z_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01Z"})); + json.put("test_long_repeated", new JSONArray(new Long[] {1687984085000000L})); + json.put("test_int_repeated", new JSONArray(new Integer[] {153480695})); + json.put("test_float_repeated", new JSONArray(new String[] {"1.534680695e11"})); + json.put("test_offset_repeated", new JSONArray(new String[] {"2022-04-05T09:06:11+04:00"})); + json.put("test_timezone_repeated", new JSONArray(new String[] {"2022-04-05 09:06:11 PST"})); + json.put("test_saformat_repeated", new JSONArray(new String[] {"2018/08/19 12:11"})); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRepeatedTimestamp.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + @Test public void testDate() throws Exception { TableSchema tableSchema = @@ -993,7 +1062,7 @@ public void testStructComplex() throws Exception { .setTestNumeric( BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.23456"))) .setTestGeo("POINT(1,1)") - .setTestTimestamp(123456780000000L) + .setTestTimestamp(12345678L) .setTestTime(CivilTimeEncoder.encodePacked64TimeMicros(LocalTime.of(1, 0, 1))) .setTestTimeStr(89332507144L) .addTestNumericRepeated( diff --git a/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto b/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto index 03209d9c81..d70d214be2 100644 --- a/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto +++ b/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto @@ -156,6 +156,17 @@ message TestTimestamp { optional int64 test_saformat = 8; } +message TestRepeatedTimestamp { + repeated int64 test_string_repeated = 1; + repeated int64 test_string_t_z_repeated = 2; + repeated int64 test_long_repeated = 3; + repeated int64 test_int_repeated = 4; + repeated int64 test_float_repeated = 5; + repeated int64 test_offset_repeated = 6; + repeated int64 test_timezone_repeated = 7; + repeated int64 test_saformat_repeated = 8; +} + message TestDate { optional int32 test_string = 1; optional int32 test_long = 2; From afc550aeacb0e3f26440eeb70d2cebbf65922c07 Mon Sep 17 00:00:00 2001 From: Gaole Meng Date: Thu, 13 Jul 2023 11:26:53 -0700 Subject: [PATCH 10/15] feat: improve json to proto conversion by caching schema (#2179) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add public api to stream writer to set the maximum wait time * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * modify back the readme change from owl post processor * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: Reduce the timeout to 5 minutes for the requests wait time in queue. Since in write api server side we have total timeout of 2 minutes, it does not make sense to wait 15 minutes to determine whether we have met dead connection, let's reduce the timeout here * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: 1.disable refresh of stream writer when the table schema is explicitly provided 2. fix location string matching for multiplexing * feat: improve json stream writer json to proto conversion speed by caching the schema. This will introduce approximately 2x improvement to append speed * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../clirr-ignored-differences.xml | 12 + .../cloud/bigquery/storage/v1/Exceptions.java | 23 ++ .../storage/v1/JsonToProtoMessage.java | 289 +++++++++++++----- .../storage/v1/SchemaAwareStreamWriter.java | 51 ++-- .../bigquery/storage/v1/ToProtoConverter.java | 5 +- .../storage/v1/JsonToProtoMessageTest.java | 120 ++++++-- 6 files changed, 367 insertions(+), 133 deletions(-) diff --git a/google-cloud-bigquerystorage/clirr-ignored-differences.xml b/google-cloud-bigquerystorage/clirr-ignored-differences.xml index 96d4b3d595..1ce4f651e5 100644 --- a/google-cloud-bigquerystorage/clirr-ignored-differences.xml +++ b/google-cloud-bigquerystorage/clirr-ignored-differences.xml @@ -157,5 +157,17 @@ com/google/cloud/bigquery/storage/v1/JsonStreamWriter boolean isDone() + + 7006 + com/google/cloud/bigquery/storage/v1/ToProtoConverter + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Object, boolean) + java.util.List + + + 7005 + com/google/cloud/bigquery/storage/v1/ToProtoConverter + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Object, boolean) + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Iterable, boolean) + diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java index fc4d4d8268..8824e43c77 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java @@ -259,6 +259,29 @@ public AppendSerializationError( } } + /** This exception is thrown from proto converter to wrap the row index to error mapping. */ + static class RowIndexToErrorException extends IllegalArgumentException { + Map rowIndexToErrorMessage; + + boolean hasDataUnknownError; + + public RowIndexToErrorException( + Map rowIndexToErrorMessage, boolean hasDataUnknownError) { + this.rowIndexToErrorMessage = rowIndexToErrorMessage; + this.hasDataUnknownError = hasDataUnknownError; + } + + // This message should not be exposed to the user directly. + // Please examine individual row's error through `rowIndexToErrorMessage`. + public String getMessage() { + return "The map of row index to error message is " + rowIndexToErrorMessage.toString(); + } + + public boolean hasDataUnknownError() { + return hasDataUnknownError; + } + } + /** This exception is used internally to handle field level parsing errors. */ public static class FieldParseError extends IllegalArgumentException { private final String fieldName; diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java index 52faf55742..6cde31081a 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java @@ -16,6 +16,7 @@ package com.google.cloud.bigquery.storage.v1; import com.google.api.pathtemplate.ValidationException; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.primitives.Doubles; @@ -29,7 +30,10 @@ import java.math.BigDecimal; import java.math.RoundingMode; import java.time.LocalDate; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; @@ -140,7 +144,10 @@ public static DynamicMessage convertJsonToProtoMessage( } /** - * Converts input message to Protobuf + * Converts input message to Protobuf. + * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. * * @param protoSchema the schema of the output Protobuf schems. * @param tableSchema tha underlying table schema for which Protobuf is being built. @@ -149,15 +156,37 @@ public static DynamicMessage convertJsonToProtoMessage( * schema should be accepted. * @return Converted message in Protobuf format. */ - @Override public DynamicMessage convertToProtoMessage( Descriptor protoSchema, TableSchema tableSchema, Object json, boolean ignoreUnknownFields) { return convertToProtoMessage(protoSchema, tableSchema, (JSONObject) json, ignoreUnknownFields); } + /** + * Converts Json array to list of Protobuf + * + * @param protoSchema the schema of the output Protobuf schems. + * @param tableSchema tha underlying table schema for which Protobuf is being built. + * @param jsonArray the input JSON array converted to Protobuf. + * @param ignoreUnknownFields flag indicating that the additional fields not present in the output + * schema should be accepted. + * @return Converted message in Protobuf format. + */ + @Override + public List convertToProtoMessage( + Descriptor protoSchema, + TableSchema tableSchema, + Iterable jsonArray, + boolean ignoreUnknownFields) { + return convertToProtoMessage( + protoSchema, tableSchema, (JSONArray) jsonArray, ignoreUnknownFields); + } + /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * * @param protoSchema * @param json * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. @@ -174,6 +203,9 @@ public DynamicMessage convertToProtoMessage(Descriptor protoSchema, JSONObject j /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * * @param protoSchema * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, * NUMERIC, BIGNUMERIC @@ -194,6 +226,9 @@ public DynamicMessage convertToProtoMessage( /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * * @param protoSchema * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, * NUMERIC, BIGNUMERIC @@ -208,11 +243,48 @@ public DynamicMessage convertToProtoMessage( Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); Preconditions.checkNotNull(tableSchema, "TableSchema is null."); Preconditions.checkState(json.length() != 0, "JSONObject is empty."); - return convertToProtoMessage( protoSchema, tableSchema.getFieldsList(), json, "root", ignoreUnknownFields); } + /** + * Converts Json array to list of protocol buffer messages given the protocol buffer descriptor. + * + * @param protoSchema + * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, + * NUMERIC, BIGNUMERIC + * @param jsonArray + * @param ignoreUnknownFields allows unknown fields in JSON input to be ignored. + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public List convertToProtoMessage( + Descriptor protoSchema, + TableSchema tableSchema, + JSONArray jsonArray, + boolean ignoreUnknownFields) + throws IllegalArgumentException { + Preconditions.checkNotNull(jsonArray, "jsonArray is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkNotNull(tableSchema, "tableSchema is null."); + Preconditions.checkState(jsonArray.length() != 0, "jsonArray is empty."); + + return convertToProtoMessage( + protoSchema, tableSchema.getFieldsList(), jsonArray, "root", ignoreUnknownFields); + } + + private DynamicMessage convertToProtoMessage( + Descriptor protoSchema, + List tableSchema, + JSONObject jsonObject, + String jsonScope, + boolean ignoreUnknownFields) { + JSONArray jsonArray = new JSONArray(); + jsonArray.put(jsonObject); + return convertToProtoMessage( + protoSchema, tableSchema, jsonArray, jsonScope, ignoreUnknownFields) + .get(0); + } + /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * @@ -221,84 +293,162 @@ public DynamicMessage convertToProtoMessage( * @param jsonScope Debugging purposes * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. */ - private DynamicMessage convertToProtoMessage( + private List convertToProtoMessage( Descriptor protoSchema, List tableSchema, - JSONObject json, + JSONArray jsonArray, String jsonScope, boolean ignoreUnknownFields) - throws IllegalArgumentException { - - DynamicMessage.Builder protoMsg = DynamicMessage.newBuilder(protoSchema); - String[] jsonNames = JSONObject.getNames(json); - if (jsonNames == null) { - return protoMsg.build(); - } - for (String jsonName : jsonNames) { - // We want lowercase here to support case-insensitive data writes. - // The protobuf descriptor that is used is assumed to have all lowercased fields - String jsonFieldLocator = jsonName.toLowerCase(); + throws RowIndexToErrorException { + List messageList = new ArrayList<>(); + Map jsonNameToMetadata = new HashMap<>(); + Map rowIndexToErrorMessage = new HashMap<>(); - // If jsonName is not compatible with proto naming convention, we should look by its - // placeholder name. - if (!BigQuerySchemaUtil.isProtoCompatible(jsonFieldLocator)) { - jsonFieldLocator = BigQuerySchemaUtil.generatePlaceholderFieldName(jsonFieldLocator); - } - String currentScope = jsonScope + "." + jsonName; - FieldDescriptor field = protoSchema.findFieldByName(jsonFieldLocator); - if (field == null && !ignoreUnknownFields) { - throw new Exceptions.DataHasUnknownFieldException(currentScope); - } else if (field == null) { - continue; - } - TableFieldSchema fieldSchema = null; - if (tableSchema != null) { - // protoSchema is generated from tableSchema so their field ordering should match. - fieldSchema = tableSchema.get(field.getIndex()); - if (!fieldSchema.getName().toLowerCase().equals(BigQuerySchemaUtil.getFieldName(field))) { - throw new ValidationException( - "Field at index " - + field.getIndex() - + " has mismatch names (" - + fieldSchema.getName() - + ") (" - + field.getName() - + ")"); - } - } + boolean hasDataUnknownError = false; + for (int i = 0; i < jsonArray.length(); i++) { try { - if (!field.isRepeated()) { - fillField( - protoMsg, field, fieldSchema, json, jsonName, currentScope, ignoreUnknownFields); + DynamicMessage.Builder protoMsg = DynamicMessage.newBuilder(protoSchema); + JSONObject jsonObject = jsonArray.getJSONObject(i); + String[] jsonNames = JSONObject.getNames(jsonObject); + if (jsonNames == null) { + messageList.add(protoMsg.build()); + continue; + } + for (String jsonName : jsonNames) { + String currentScope = jsonScope + "." + jsonName; + FieldDescriptorAndFieldTableSchema fieldDescriptorAndFieldTableSchema = + jsonNameToMetadata.computeIfAbsent( + currentScope, + k -> { + return computeDescriptorAndSchema( + currentScope, ignoreUnknownFields, jsonName, protoSchema, tableSchema); + }); + if (fieldDescriptorAndFieldTableSchema == null) { + continue; + } + FieldDescriptor field = fieldDescriptorAndFieldTableSchema.fieldDescriptor; + TableFieldSchema tableFieldSchema = fieldDescriptorAndFieldTableSchema.tableFieldSchema; + try { + if (!field.isRepeated()) { + fillField( + protoMsg, + field, + tableFieldSchema, + jsonObject, + jsonName, + currentScope, + ignoreUnknownFields); + } else { + fillRepeatedField( + protoMsg, + field, + tableFieldSchema, + jsonObject, + jsonName, + currentScope, + ignoreUnknownFields); + } + } catch (Exceptions.FieldParseError ex) { + throw ex; + } catch (Exception ex) { + // This function is recursively called, so this throw will be caught and throw directly + // out by the catch above. + throw new Exceptions.FieldParseError( + currentScope, + tableFieldSchema != null + ? tableFieldSchema.getType().name() + : field.getType().name(), + ex); + } + } + DynamicMessage msg; + try { + msg = protoMsg.build(); + } catch (UninitializedMessageException e) { + String errorMsg = e.getMessage(); + int idxOfColon = errorMsg.indexOf(":"); + String missingFieldName = errorMsg.substring(idxOfColon + 2); + throw new IllegalArgumentException( + String.format( + "JSONObject does not have the required field %s.%s.", + jsonScope, missingFieldName)); + } + messageList.add(msg); + } catch (IllegalArgumentException exception) { + if (exception instanceof Exceptions.DataHasUnknownFieldException) { + hasDataUnknownError = true; + } + if (exception instanceof Exceptions.FieldParseError) { + Exceptions.FieldParseError ex = (Exceptions.FieldParseError) exception; + rowIndexToErrorMessage.put( + i, + "Field " + + ex.getFieldName() + + " failed to convert to " + + ex.getBqType() + + ". Error: " + + ex.getCause().getMessage()); } else { - fillRepeatedField( - protoMsg, field, fieldSchema, json, jsonName, currentScope, ignoreUnknownFields); + rowIndexToErrorMessage.put(i, exception.getMessage()); } - } catch (Exceptions.FieldParseError ex) { - throw ex; - } catch (Exception ex) { - // This function is recursively called, so this throw will be caught and throw directly out - // by the catch - // above. - throw new Exceptions.FieldParseError( - currentScope, - fieldSchema != null ? fieldSchema.getType().name() : field.getType().name(), - ex); } } + if (!rowIndexToErrorMessage.isEmpty()) { + throw new RowIndexToErrorException(rowIndexToErrorMessage, hasDataUnknownError); + } + return messageList; + } - DynamicMessage msg; - try { - msg = protoMsg.build(); - } catch (UninitializedMessageException e) { - String errorMsg = e.getMessage(); - int idxOfColon = errorMsg.indexOf(":"); - String missingFieldName = errorMsg.substring(idxOfColon + 2); - throw new IllegalArgumentException( - String.format( - "JSONObject does not have the required field %s.%s.", jsonScope, missingFieldName)); + private static final class FieldDescriptorAndFieldTableSchema { + TableFieldSchema tableFieldSchema; + + // Field descriptor + FieldDescriptor fieldDescriptor; + } + + private FieldDescriptorAndFieldTableSchema computeDescriptorAndSchema( + String currentScope, + boolean ignoreUnknownFields, + String jsonName, + Descriptor protoSchema, + List tableFieldSchemaList) { + + // We want lowercase here to support case-insensitive data writes. + // The protobuf descriptor that is used is assumed to have all lowercased fields + String jsonFieldLocator = jsonName.toLowerCase(); + + // If jsonName is not compatible with proto naming convention, we should look by its + // placeholder name. + if (!BigQuerySchemaUtil.isProtoCompatible(jsonFieldLocator)) { + jsonFieldLocator = BigQuerySchemaUtil.generatePlaceholderFieldName(jsonFieldLocator); } - return msg; + + FieldDescriptor field = protoSchema.findFieldByName(jsonFieldLocator); + if (field == null && !ignoreUnknownFields) { + throw new Exceptions.DataHasUnknownFieldException(currentScope); + } else if (field == null) { + return null; + } + TableFieldSchema fieldSchema = null; + if (tableFieldSchemaList != null) { + // protoSchema is generated from tableSchema so their field ordering should match. + fieldSchema = tableFieldSchemaList.get(field.getIndex()); + if (!fieldSchema.getName().toLowerCase().equals(BigQuerySchemaUtil.getFieldName(field))) { + throw new ValidationException( + "Field at index " + + field.getIndex() + + " has mismatch names (" + + fieldSchema.getName() + + ") (" + + field.getName() + + ")"); + } + } + FieldDescriptorAndFieldTableSchema fieldDescriptorAndFieldTableSchema = + new FieldDescriptorAndFieldTableSchema(); + fieldDescriptorAndFieldTableSchema.fieldDescriptor = field; + fieldDescriptorAndFieldTableSchema.tableFieldSchema = fieldSchema; + return fieldDescriptorAndFieldTableSchema; } /** @@ -321,7 +471,6 @@ private void fillField( String currentScope, boolean ignoreUnknownFields) throws IllegalArgumentException { - java.lang.Object val = json.get(exactJsonKeyName); if (val == JSONObject.NULL) { return; diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java index cbe5d63478..32f8e0f7e3 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java @@ -21,13 +21,15 @@ import com.google.api.gax.core.ExecutorProvider; import com.google.api.gax.rpc.TransportChannelProvider; import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; import com.google.common.base.Preconditions; import com.google.protobuf.Descriptors.Descriptor; import com.google.protobuf.Descriptors.DescriptorValidationException; -import com.google.protobuf.Message; +import com.google.protobuf.DynamicMessage; import com.google.rpc.Code; import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.logging.Logger; import java.util.regex.Matcher; @@ -124,19 +126,23 @@ private void refreshWriter(TableSchema updatedSchema) this.streamWriter = streamWriterBuilder.setWriterSchema(this.protoSchema).build(); } - private Message buildMessage(T item) + private List buildMessage(Iterable items) throws InterruptedException, DescriptorValidationException, IOException { try { return this.toProtoConverter.convertToProtoMessage( - this.descriptor, this.tableSchema, item, ignoreUnknownFields); - } catch (Exceptions.DataHasUnknownFieldException ex) { + this.descriptor, this.tableSchema, items, ignoreUnknownFields); + } catch (RowIndexToErrorException ex) { + // We only retry for data unknown error. + if (!ex.hasDataUnknownError) { + throw ex; + } // Directly return error when stream writer refresh is disabled. if (this.skipRefreshStreamWriter) { throw ex; } LOG.warning( - "Saw unknown field " - + ex.getFieldName() + "Saw unknown field error during proto message conversin within error messages" + + ex.rowIndexToErrorMessage + ", try to refresh the writer with updated schema, stream: " + streamName); GetWriteStreamRequest writeStreamRequest = @@ -147,7 +153,7 @@ private Message buildMessage(T item) WriteStream writeStream = client.getWriteStream(writeStreamRequest); refreshWriter(writeStream.getTableSchema()); return this.toProtoConverter.convertToProtoMessage( - this.descriptor, this.tableSchema, item, ignoreUnknownFields); + this.descriptor, this.tableSchema, items, ignoreUnknownFields); } } /** @@ -169,7 +175,6 @@ public ApiFuture append(Iterable items, long offset) if (!this.skipRefreshStreamWriter && this.streamWriter.getUpdatedSchema() != null) { refreshWriter(this.streamWriter.getUpdatedSchema()); } - ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); // Any error in convertToProtoMessage will throw an // IllegalArgumentException/IllegalStateException/NullPointerException. @@ -177,29 +182,15 @@ public ApiFuture append(Iterable items, long offset) // After the conversion is finished an AppendSerializtionError exception that contains all the // conversion errors will be thrown. Map rowIndexToErrorMessage = new HashMap<>(); - int i = -1; - for (T item : items) { - i += 1; - try { - Message protoMessage = buildMessage(item); - rowsBuilder.addSerializedRows(protoMessage.toByteString()); - } catch (IllegalArgumentException exception) { - if (exception instanceof Exceptions.FieldParseError) { - Exceptions.FieldParseError ex = (Exceptions.FieldParseError) exception; - rowIndexToErrorMessage.put( - i, - "Field " - + ex.getFieldName() - + " failed to convert to " - + ex.getBqType() - + ". Error: " - + ex.getCause().getMessage()); - } else { - rowIndexToErrorMessage.put(i, exception.getMessage()); - } - } catch (InterruptedException ex) { - throw new RuntimeException(ex); + try { + List protoMessages = buildMessage(items); + for (DynamicMessage dynamicMessage : protoMessages) { + rowsBuilder.addSerializedRows(dynamicMessage.toByteString()); } + } catch (RowIndexToErrorException exception) { + rowIndexToErrorMessage = exception.rowIndexToErrorMessage; + } catch (InterruptedException ex) { + throw new RuntimeException(ex); } if (!rowIndexToErrorMessage.isEmpty()) { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java index ca17ed11e7..76ef223e24 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java @@ -17,11 +17,12 @@ import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; +import java.util.List; public interface ToProtoConverter { - DynamicMessage convertToProtoMessage( + List convertToProtoMessage( Descriptors.Descriptor protoSchema, TableSchema tableSchema, - T inputObject, + Iterable inputObject, boolean ignoreUnknownFields); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java index d6da83275d..dd3a6dcfa1 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java @@ -20,6 +20,7 @@ import com.google.cloud.bigquery.storage.test.JsonTest.*; import com.google.cloud.bigquery.storage.test.SchemaTest.*; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.protobuf.ByteString; @@ -29,6 +30,7 @@ import java.math.BigDecimal; import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.logging.Logger; import org.json.JSONArray; @@ -604,7 +606,7 @@ public void testInt32NotMatchInt64() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt32.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int32 field at root.int.", e.getMessage()); + assertTrue(e.getMessage().contains("JSONObject does not have a int32 field at root.int.")); } } @@ -625,7 +627,8 @@ public void testDateTimeMismatch() throws Exception { TestDatetime.getDescriptor(), tableSchema, json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int64 field at root.datetime.", e.getMessage()); + assertTrue( + e.getMessage().contains("JSONObject does not have a int64 field at root.datetime.")); } } @@ -674,7 +677,8 @@ public void testTimeMismatch() throws Exception { TestTime.getDescriptor(), tableSchema, json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int64 field at root.time[0].", e.getMessage()); + assertTrue( + e.getMessage().contains("JSONObject does not have a int64 field at root.time[0].")); } } @@ -908,9 +912,12 @@ public void testAllTypes() throws Exception { assertEquals(protoMsg, AllTypesToCorrectProto.get(entry.getKey())[success]); success += 1; } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a " + entry.getValue() + " field at root.test_field_type.", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a " + + entry.getValue() + + " field at root.test_field_type.")); } } if (entry.getKey() == DoubleType.getDescriptor()) { @@ -943,12 +950,12 @@ public void testAllRepeatedTypesWithLimits() throws Exception { LOG.info(e.getMessage()); assertTrue( e.getMessage() - .equals( + .contains( "JSONObject does not have a " + entry.getValue() + " field at root.test_repeated[0].") || e.getMessage() - .equals("Error: root.test_repeated[0] could not be converted to byte[].")); + .contains("Error: root.test_repeated[0] could not be converted to byte[].")); } } if (entry.getKey() == RepeatedDouble.getDescriptor()) { @@ -994,8 +1001,9 @@ public void testRequired() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestRequired.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have the required field root.required_double.", e.getMessage()); + assertTrue( + e.getMessage() + .contains("JSONObject does not have the required field root.required_double.")); } } @@ -1026,9 +1034,10 @@ public void testStructSimpleFail() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(MessageType.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a string field at root.test_field_type.test_field_type.", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at root.test_field_type.test_field_type.")); } } @@ -1196,8 +1205,9 @@ public void testStructComplexFail() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexRoot.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a int64 field at root.complex_lvl1.test_int.", e.getMessage()); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a int64 field at root.complex_lvl1.test_int.")); } } @@ -1210,8 +1220,9 @@ public void testRepeatedWithMixedTypes() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedDouble.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a double field at root.test_repeated[2].", e.getMessage()); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a double field at root.test_repeated[2].")); } } @@ -1272,9 +1283,10 @@ public void testNestedRepeatedComplexFail() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(NestedRepeated.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a string field at root.repeated_string.test_repeated[0].", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at root.repeated_string.test_repeated[0].")); } } @@ -1305,10 +1317,10 @@ public void testAllowUnknownFieldsError() throws Exception { DynamicMessage protoMsg = JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedInt64.getDescriptor(), json); Assert.fail("Should fail"); - } catch (Exceptions.DataHasUnknownFieldException e) { - assertEquals( - "The source object has fields unknown to BigQuery: root.string.", e.getMessage()); - assertEquals("root.string", e.getFieldName()); + } catch (IllegalArgumentException e) { + assertTrue( + e.getMessage() + .contains("The source object has fields unknown to BigQuery: " + "root.string.")); } } @@ -1369,9 +1381,10 @@ public void testAllowUnknownFieldsSecondLevel() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexLvl1.getDescriptor(), json); Assert.fail("Should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "The source object has fields unknown to BigQuery: root.complex_lvl2.no_match.", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "The source object has fields unknown to BigQuery: root.complex_lvl2.no_match.")); } } @@ -1434,9 +1447,9 @@ public void testBadJsonFieldRepeated() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage( RepeatedBytes.getDescriptor(), ts, json); Assert.fail("Should fail"); - } catch (Exceptions.FieldParseError ex) { - assertEquals(ex.getBqType(), "NUMERIC"); - assertEquals(ex.getFieldName(), "root.test_repeated"); + } catch (RowIndexToErrorException ex) { + assertTrue(ex.rowIndexToErrorMessage.size() == 1); + assertTrue(ex.getMessage().contains("root.test_repeated failed to convert to NUMERIC.")); } } @@ -1461,7 +1474,7 @@ public void testBadJsonFieldIntRepeated() throws Exception { RepeatedInt32.getDescriptor(), ts, json); Assert.fail("Should fail"); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Text 'blah' could not be parsed at index 0"); + assertTrue(ex.getMessage().contains("Text 'blah' could not be parsed at index 0")); } } @@ -1528,6 +1541,51 @@ public void testDoubleAndFloatToNumericConversion() { assertEquals(expectedProto, protoMsg); } + @Test + public void testDoubleAndFloatToNumericConversionWithJsonArray() { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("numeric") + .setType(TableFieldSchema.Type.NUMERIC) + .build()) + .build(); + List protoList = new ArrayList<>(); + int protoNum = 10; + for (int i = 0; i < protoNum; i++) { + protoList.add( + TestNumeric.newBuilder() + .setNumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("24.678" + i))) + .build()); + } + + JSONArray doubleJsonArray = new JSONArray(); + JSONArray floatJsonArray = new JSONArray(); + for (int i = 0; i < protoNum; i++) { + JSONObject doubleJson = new JSONObject(); + doubleJson.put("numeric", new Double(24.678 + (i * 0.0001))); + doubleJsonArray.put(doubleJson); + + JSONObject floatJson = new JSONObject(); + floatJson.put("numeric", new Float(24.678 + (i * 0.0001))); + floatJsonArray.put(floatJson); + } + + List protoMsgList = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestNumeric.getDescriptor(), ts, doubleJsonArray, false); + assertEquals(protoList, protoMsgList); + + protoMsgList = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestNumeric.getDescriptor(), ts, floatJsonArray, false); + assertEquals(protoList, protoMsgList); + } + @Test public void testBigDecimalToBigNumericConversion() { TableSchema ts = From d9b526a2e4109ef5ed95fb74373f2f13b06c7c54 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 12:49:51 -0400 Subject: [PATCH 11/15] feat: add ResourceExhausted to retryable error for Write API unary calls (#2178) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add ResourceExhausted to retryable error for Write API unary calls docs: add multiplexing documentation PiperOrigin-RevId: 545839491 Source-Link: https://github.com/googleapis/googleapis/commit/2b006afc7a392006602ce0868c22341b5aeef4a8 Source-Link: https://github.com/googleapis/googleapis-gen/commit/0d52d385bd4e78c7b2c83755013fe103e804c384 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGQ1MmQzODViZDRlNzhjN2IyYzgzNzU1MDEzZmUxMDNlODA0YzM4NCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../v1/stub/BigQueryWriteStubSettings.java | 4 +- .../storage/v1/MockBigQueryWriteImpl.java | 3 +- .../v1beta1/MockBigQueryStorageImpl.java | 3 +- .../v1beta2/MockBigQueryWriteImpl.java | 3 +- .../storage/v1/AppendRowsRequest.java | 382 ++++++++++++++---- .../v1/AppendRowsRequestOrBuilder.java | 56 ++- .../bigquery/storage/v1/ReadSession.java | 32 +- .../storage/v1/ReadSessionOrBuilder.java | 8 +- .../cloud/bigquery/storage/v1/storage.proto | 53 ++- .../cloud/bigquery/storage/v1/stream.proto | 8 +- .../bigquery/storage/v1beta1/ReadOptions.java | 23 -- .../bigquery/storage/v1beta1/Storage.java | 10 - .../bigquery/storage/v1beta2/StreamProto.java | 4 +- 13 files changed, 418 insertions(+), 171 deletions(-) diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java index 6d0c86e18a..9331a9dc48 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java @@ -259,7 +259,9 @@ public static class Builder extends StubSettings.BuildernewArrayList( - StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.RESOURCE_EXHAUSTED))); RETRYABLE_CODE_DEFINITIONS = definitions.build(); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java index f10e9f3a9d..cadc196f5c 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java @@ -174,8 +174,7 @@ public void batchCommitWriteStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" - + " %s", + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", response == null ? "null" : response.getClass().getName(), BatchCommitWriteStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java index 889be3bbb8..b1d6e58aae 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java @@ -117,8 +117,7 @@ public void batchCreateReadSessionStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCreateReadSessionStreams, expected" - + " %s or %s", + "Unrecognized response type %s for method BatchCreateReadSessionStreams, expected %s or %s", response == null ? "null" : response.getClass().getName(), Storage.BatchCreateReadSessionStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java index 814d5b73ff..098a1e7fa4 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java @@ -174,8 +174,7 @@ public void batchCommitWriteStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" - + " %s", + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", response == null ? "null" : response.getClass().getName(), BatchCommitWriteStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java index 55258107b7..bf64f57698 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java @@ -24,9 +24,10 @@ *

  * Request message for `AppendRows`.
  *
- * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
- * parts of the AppendRowsRequest need only be specified for the first request
- * sent each time the gRPC network connection is opened/reopened.
+ * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+ * AppendRowsRequest need only be specified for the first request before
+ * switching table destinations. You can also switch table destinations within
+ * the same connection for the default stream.
  *
  * The size of a single AppendRowsRequest must be less than 10 MB in size.
  * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
@@ -85,10 +86,9 @@ protected com.google.protobuf.MapField internalGetMapField(int number) {
    *
    *
    * 
-   * An enum to indicate how to interpret missing values. Missing values are
-   * fields present in user schema but missing in rows. A missing value can
-   * represent a NULL or a column default value defined in BigQuery table
-   * schema.
+   * An enum to indicate how to interpret missing values of fields that are
+   * present in user schema but missing in rows. A missing value can represent a
+   * NULL or a column default value defined in BigQuery table schema.
    * 
* * Protobuf enum {@code @@ -260,9 +260,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -274,9 +279,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -288,9 +298,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -390,9 +405,14 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -407,9 +427,14 @@ public boolean hasWriterSchema() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -426,9 +451,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -915,9 +945,14 @@ public Builder mergeFrom( * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -931,9 +966,14 @@ public boolean hasWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -953,9 +993,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -977,9 +1022,14 @@ public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -999,9 +1049,14 @@ public Builder setWriterSchema( * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1027,9 +1082,14 @@ public Builder mergeWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchem * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1048,9 +1108,14 @@ public Builder clearWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1064,9 +1129,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder getWriterSchemaB * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1084,9 +1154,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchema * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1440,10 +1515,14 @@ public RowsCase getRowsCase() { * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -1452,6 +1531,22 @@ public RowsCase getRowsCase() {
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -1476,10 +1571,14 @@ public java.lang.String getWriteStream() { * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -1488,6 +1587,22 @@ public java.lang.String getWriteStream() {
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -2248,9 +2363,10 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * Request message for `AppendRows`.
    *
-   * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
-   * parts of the AppendRowsRequest need only be specified for the first request
-   * sent each time the gRPC network connection is opened/reopened.
+   * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+   * AppendRowsRequest need only be specified for the first request before
+   * switching table destinations. You can also switch table destinations within
+   * the same connection for the default stream.
    *
    * The size of a single AppendRowsRequest must be less than 10 MB in size.
    * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
@@ -2563,10 +2679,14 @@ public Builder clearRows() {
      *
      *
      * 
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2575,6 +2695,22 @@ public Builder clearRows() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2598,10 +2734,14 @@ public java.lang.String getWriteStream() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2610,6 +2750,22 @@ public java.lang.String getWriteStream() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2633,10 +2789,14 @@ public com.google.protobuf.ByteString getWriteStreamBytes() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2645,6 +2805,22 @@ public com.google.protobuf.ByteString getWriteStreamBytes() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2667,10 +2843,14 @@ public Builder setWriteStream(java.lang.String value) { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2679,6 +2859,22 @@ public Builder setWriteStream(java.lang.String value) {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2697,10 +2893,14 @@ public Builder clearWriteStream() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2709,6 +2909,22 @@ public Builder clearWriteStream() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java index a9d1f10fe1..1f40b2ec71 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java @@ -27,10 +27,14 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -39,6 +43,22 @@ public interface AppendRowsRequestOrBuilder
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -52,10 +72,14 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -64,6 +88,22 @@ public interface AppendRowsRequestOrBuilder
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java index 54711847d8..28eb0fd9f0 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java @@ -4342,10 +4342,10 @@ public long getEstimatedTotalBytesScanned() { * * *
-   * Output only. A pre-projected estimate of the total physical size (in bytes)
-   * of files this session will scan when all streams are completely consumed.
-   * This estimate does not depend on the selected columns and can be based on
-   * metadata from the table which might be incomplete or stale. Only set for
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
    * BigLake tables.
    * 
* @@ -7182,10 +7182,10 @@ public Builder clearEstimatedTotalBytesScanned() { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* @@ -7203,10 +7203,10 @@ public long getEstimatedTotalPhysicalFileSize() { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* @@ -7228,10 +7228,10 @@ public Builder setEstimatedTotalPhysicalFileSize(long value) { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java index 85a8b45aa1..1fd051672d 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java @@ -435,10 +435,10 @@ public interface ReadSessionOrBuilder * * *
-   * Output only. A pre-projected estimate of the total physical size (in bytes)
-   * of files this session will scan when all streams are completely consumed.
-   * This estimate does not depend on the selected columns and can be based on
-   * metadata from the table which might be incomplete or stale. Only set for
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
    * BigLake tables.
    * 
* diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto index d28c36f43f..2959faaf0b 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto @@ -397,9 +397,10 @@ message CreateWriteStreamRequest { // Request message for `AppendRows`. // -// Due to the nature of AppendRows being a bidirectional streaming RPC, certain -// parts of the AppendRowsRequest need only be specified for the first request -// sent each time the gRPC network connection is opened/reopened. +// Because AppendRows is a bidirectional streaming RPC, certain parts of the +// AppendRowsRequest need only be specified for the first request before +// switching table destinations. You can also switch table destinations within +// the same connection for the default stream. // // The size of a single AppendRowsRequest must be less than 10 MB in size. // Requests larger than this return an error, typically `INVALID_ARGUMENT`. @@ -407,9 +408,14 @@ message AppendRowsRequest { // ProtoData contains the data rows and schema when constructing append // requests. message ProtoData { - // Proto schema used to serialize the data. This value only needs to be - // provided as part of the first request on a gRPC network connection, - // and will be ignored for subsequent requests on the connection. + // The protocol buffer schema used to serialize the data. Provide this value + // whenever: + // + // * You send the first request of an RPC connection. + // + // * You change the input schema. + // + // * You specify a new destination table. ProtoSchema writer_schema = 1; // Serialized row data in protobuf message format. @@ -419,10 +425,9 @@ message AppendRowsRequest { ProtoRows rows = 2; } - // An enum to indicate how to interpret missing values. Missing values are - // fields present in user schema but missing in rows. A missing value can - // represent a NULL or a column default value defined in BigQuery table - // schema. + // An enum to indicate how to interpret missing values of fields that are + // present in user schema but missing in rows. A missing value can represent a + // NULL or a column default value defined in BigQuery table schema. enum MissingValueInterpretation { // Invalid missing value interpretation. Requests with this value will be // rejected. @@ -436,10 +441,14 @@ message AppendRowsRequest { DEFAULT_VALUE = 2; } - // Required. The write_stream identifies the target of the append operation, - // and only needs to be specified as part of the first request on the gRPC - // connection. If provided for subsequent requests, it must match the value of - // the first request. + // Required. The write_stream identifies the append operation. It must be + // provided in the following scenarios: + // + // * In the first request to an AppendRows connection. + // + // * In all subsequent requests to an AppendRows connection, if you use the + // same connection to write to multiple tables or change the input schema for + // default streams. // // For explicitly created write streams, the format is: // @@ -448,6 +457,22 @@ message AppendRowsRequest { // For the special default stream, the format is: // // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. + // + // An example of a possible sequence of requests with write_stream fields + // within a single connection: + // + // * r1: {write_stream: stream_name_1} + // + // * r2: {write_stream: /*omit*/} + // + // * r3: {write_stream: /*omit*/} + // + // * r4: {write_stream: stream_name_2} + // + // * r5: {write_stream: stream_name_2} + // + // The destination changed in request_4, so the write_stream field must be + // populated in all subsequent requests in this stream. string write_stream = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto index 0a7c7c79c0..785c74f788 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto @@ -194,10 +194,10 @@ message ReadSession { int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. A pre-projected estimate of the total physical size (in bytes) - // of files this session will scan when all streams are completely consumed. - // This estimate does not depend on the selected columns and can be based on - // metadata from the table which might be incomplete or stale. Only set for + // Output only. A pre-projected estimate of the total physical size of files + // (in bytes) that this session will scan when all streams are consumed. This + // estimate is independent of the selected columns and can be based on + // incomplete or stale metadata from the table. This field is only set for // BigLake tables. int64 estimated_total_physical_file_size = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java index f0070660b2..9c058789f6 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java @@ -90,7 +90,6 @@ public interface TableReadOptionsOrBuilder * @return A list containing the selectedFields. */ java.util.List getSelectedFieldsList(); - /** * * @@ -149,7 +148,6 @@ public interface TableReadOptionsOrBuilder * @return The count of selectedFields. */ int getSelectedFieldsCount(); - /** * * @@ -209,7 +207,6 @@ public interface TableReadOptionsOrBuilder * @return The selectedFields at the given index. */ java.lang.String getSelectedFields(int index); - /** * * @@ -291,7 +288,6 @@ public interface TableReadOptionsOrBuilder * @return The rowRestriction. */ java.lang.String getRowRestriction(); - /** * * @@ -424,7 +420,6 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { return selectedFields_; } - /** * * @@ -485,7 +480,6 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { public int getSelectedFieldsCount() { return selectedFields_.size(); } - /** * * @@ -547,7 +541,6 @@ public int getSelectedFieldsCount() { public java.lang.String getSelectedFields(int index) { return selectedFields_.get(index); } - /** * * @@ -614,7 +607,6 @@ public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { @SuppressWarnings("serial") private volatile java.lang.Object rowRestriction_ = ""; - /** * * @@ -647,7 +639,6 @@ public java.lang.String getRowRestriction() { return s; } } - /** * * @@ -1087,7 +1078,6 @@ private void ensureSelectedFieldsIsMutable() { } bitField0_ |= 0x00000001; } - /** * * @@ -1149,7 +1139,6 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { selectedFields_.makeImmutable(); return selectedFields_; } - /** * * @@ -1210,7 +1199,6 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { public int getSelectedFieldsCount() { return selectedFields_.size(); } - /** * * @@ -1272,7 +1260,6 @@ public int getSelectedFieldsCount() { public java.lang.String getSelectedFields(int index) { return selectedFields_.get(index); } - /** * * @@ -1334,7 +1321,6 @@ public java.lang.String getSelectedFields(int index) { public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { return selectedFields_.getByteString(index); } - /** * * @@ -1404,7 +1390,6 @@ public Builder setSelectedFields(int index, java.lang.String value) { onChanged(); return this; } - /** * * @@ -1473,7 +1458,6 @@ public Builder addSelectedFields(java.lang.String value) { onChanged(); return this; } - /** * * @@ -1539,7 +1523,6 @@ public Builder addAllSelectedFields(java.lang.Iterable values) onChanged(); return this; } - /** * * @@ -1604,7 +1587,6 @@ public Builder clearSelectedFields() { onChanged(); return this; } - /** * * @@ -1676,7 +1658,6 @@ public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { } private java.lang.Object rowRestriction_ = ""; - /** * * @@ -1708,7 +1689,6 @@ public java.lang.String getRowRestriction() { return (java.lang.String) ref; } } - /** * * @@ -1740,7 +1720,6 @@ public com.google.protobuf.ByteString getRowRestrictionBytes() { return (com.google.protobuf.ByteString) ref; } } - /** * * @@ -1771,7 +1750,6 @@ public Builder setRowRestriction(java.lang.String value) { onChanged(); return this; } - /** * * @@ -1798,7 +1776,6 @@ public Builder clearRowRestriction() { onChanged(); return this; } - /** * * diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java index a695486fc4..8713b6a98f 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java @@ -12454,7 +12454,6 @@ public Builder clearStatus() { : status_; } } - /** * * @@ -12487,7 +12486,6 @@ public Builder clearStatus() { com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder, com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder> throttleStatusBuilder_; - /** * * @@ -12503,7 +12501,6 @@ public Builder clearStatus() { public boolean hasThrottleStatus() { return ((bitField0_ & 0x00000010) != 0); } - /** * * @@ -12526,7 +12523,6 @@ public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus getThrot return throttleStatusBuilder_.getMessage(); } } - /** * * @@ -12551,7 +12547,6 @@ public Builder setThrottleStatus( onChanged(); return this; } - /** * * @@ -12574,7 +12569,6 @@ public Builder setThrottleStatus( onChanged(); return this; } - /** * * @@ -12604,7 +12598,6 @@ public Builder mergeThrottleStatus( onChanged(); return this; } - /** * * @@ -12625,7 +12618,6 @@ public Builder clearThrottleStatus() { onChanged(); return this; } - /** * * @@ -12642,7 +12634,6 @@ public Builder clearThrottleStatus() { onChanged(); return getThrottleStatusFieldBuilder().getBuilder(); } - /** * * @@ -12664,7 +12655,6 @@ public Builder clearThrottleStatus() { : throttleStatus_; } } - /** * * diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java index e9d0a925a8..1fd91308f5 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java @@ -56,8 +56,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { static { java.lang.String[] descriptorData = { - "\n" - + "2google/cloud/bigquery/storage/v1beta2/stream.proto\022%google.cloud.bigquery.stor" + "\n2google/cloud/bigquery/storage/v1beta2/" + + "stream.proto\022%google.cloud.bigquery.stor" + "age.v1beta2\032\037google/api/field_behavior.p" + "roto\032\031google/api/resource.proto\0321google/" + "cloud/bigquery/storage/v1beta2/arrow.pro" From 7ce19e7a4ca47df9590c1023abcc459248b1fec2 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 17 Jul 2023 18:50:39 +0200 Subject: [PATCH 12/15] deps: update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.0 (#2180) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index b00f229522..9d52b2cfc6 100644 --- a/pom.xml +++ b/pom.xml @@ -76,7 +76,7 @@ com.google.cloud google-cloud-shared-dependencies - 3.12.0 + 3.13.0 pom import From 75ce0b5d7009bbb47b91c222390cfe864b8bd84e Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 17 Jul 2023 21:02:49 +0200 Subject: [PATCH 13/15] deps: update dependency org.apache.avro:avro to v1.11.2 (#2177) --- google-cloud-bigquerystorage/pom.xml | 2 +- samples/install-without-bom/pom.xml | 2 +- samples/snapshot/pom.xml | 2 +- samples/snippets/pom.xml | 2 +- tutorials/JsonWriterDefaultStream/pom.xml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index ccded10323..183a275300 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -179,7 +179,7 @@ org.apache.avro avro - 1.11.1 + 1.11.2 test diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index 4fe8193bf3..bee51c1af9 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -42,7 +42,7 @@ org.apache.avro avro - 1.11.1 + 1.11.2 org.apache.arrow diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index c036461d94..99a8f5b1d1 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -41,7 +41,7 @@ org.apache.avro avro - 1.11.1 + 1.11.2 diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index 936fd3901b..9d1c2ba2da 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -53,7 +53,7 @@ org.apache.avro avro - 1.11.1 + 1.11.2 org.apache.arrow diff --git a/tutorials/JsonWriterDefaultStream/pom.xml b/tutorials/JsonWriterDefaultStream/pom.xml index 429e39be02..75957b86ce 100644 --- a/tutorials/JsonWriterDefaultStream/pom.xml +++ b/tutorials/JsonWriterDefaultStream/pom.xml @@ -29,7 +29,7 @@ org.apache.avro avro - 1.11.1 + 1.11.2 org.apache.arrow From 87f93a921c62cd71808cddc35382bbaabb7da54b Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 17 Jul 2023 21:31:38 +0200 Subject: [PATCH 14/15] deps: update dependency com.google.cloud:google-cloud-bigquery to v2.30.0 (#2184) --- pom.xml | 2 +- samples/install-without-bom/pom.xml | 2 +- samples/snapshot/pom.xml | 2 +- samples/snippets/pom.xml | 2 +- tutorials/JsonWriterDefaultStream/pom.xml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pom.xml b/pom.xml index 9d52b2cfc6..cb3d4fdacf 100644 --- a/pom.xml +++ b/pom.xml @@ -132,7 +132,7 @@ com.google.cloud google-cloud-bigquery - 2.29.0 + 2.30.0 test diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index bee51c1af9..ba2c5cc3b7 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -37,7 +37,7 @@ com.google.cloud google-cloud-bigquery - 2.29.0 + 2.30.0 org.apache.avro diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 99a8f5b1d1..9c68c71740 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -36,7 +36,7 @@ com.google.cloud google-cloud-bigquery - 2.29.0 + 2.30.0 org.apache.avro diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index 9d1c2ba2da..4f94b1fd8b 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -48,7 +48,7 @@ com.google.cloud google-cloud-bigquery - 2.29.0 + 2.30.0 org.apache.avro diff --git a/tutorials/JsonWriterDefaultStream/pom.xml b/tutorials/JsonWriterDefaultStream/pom.xml index 75957b86ce..256f894bef 100644 --- a/tutorials/JsonWriterDefaultStream/pom.xml +++ b/tutorials/JsonWriterDefaultStream/pom.xml @@ -24,7 +24,7 @@ com.google.cloud google-cloud-bigquery - 2.29.0 + 2.30.0 org.apache.avro From d5428f7f3e941f5be76af296b204eb3cfad4f333 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 16:41:11 -0400 Subject: [PATCH 15/15] chore(main): release 2.40.0 (#2170) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- CHANGELOG.md | 24 +++++++++++++++++++ google-cloud-bigquerystorage-bom/pom.xml | 16 ++++++------- google-cloud-bigquerystorage/pom.xml | 4 ++-- grpc-google-cloud-bigquerystorage-v1/pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../pom.xml | 4 ++-- pom.xml | 16 ++++++------- proto-google-cloud-bigquerystorage-v1/pom.xml | 4 ++-- .../pom.xml | 4 ++-- .../pom.xml | 4 ++-- samples/snapshot/pom.xml | 2 +- versions.txt | 14 +++++------ 12 files changed, 62 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6119b2db36..167f36f782 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## [2.40.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.39.1...v2.40.0) (2023-07-17) + + +### Features + +* Add ResourceExhausted to retryable error for Write API unary calls ([#2178](https://github.com/googleapis/java-bigquerystorage/issues/2178)) ([d9b526a](https://github.com/googleapis/java-bigquerystorage/commit/d9b526a2e4109ef5ed95fb74373f2f13b06c7c54)) +* Improve json to proto conversion by caching schema ([#2179](https://github.com/googleapis/java-bigquerystorage/issues/2179)) ([afc550a](https://github.com/googleapis/java-bigquerystorage/commit/afc550aeacb0e3f26440eeb70d2cebbf65922c07)) + + +### Bug Fixes + +* Interpret Integer and Float values for TIMESTAMP as microseconds ([#2175](https://github.com/googleapis/java-bigquerystorage/issues/2175)) ([e5bb5d0](https://github.com/googleapis/java-bigquerystorage/commit/e5bb5d099ea0272c4bd447b7f8fef5207c14ffc5)) +* Support DATETIME field that has a space between date and time and has only date ([#2176](https://github.com/googleapis/java-bigquerystorage/issues/2176)) ([494ce85](https://github.com/googleapis/java-bigquerystorage/commit/494ce8513e8925b4330a2bf45641ba38db625c1d)) + + +### Dependencies + +* Update dependency com.google.auto.value:auto-value to v1.10.2 ([#2171](https://github.com/googleapis/java-bigquerystorage/issues/2171)) ([721908d](https://github.com/googleapis/java-bigquerystorage/commit/721908d412f1d82aff9aed8edcf727fc5b1bf950)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.10.2 ([#2172](https://github.com/googleapis/java-bigquerystorage/issues/2172)) ([8a51fae](https://github.com/googleapis/java-bigquerystorage/commit/8a51fae180ced3b362acc350999157d3d6e0da6a)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.29.0 ([#2168](https://github.com/googleapis/java-bigquerystorage/issues/2168)) ([50ca432](https://github.com/googleapis/java-bigquerystorage/commit/50ca432854851f7cc89cb50a327d9641000b81ee)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.30.0 ([#2184](https://github.com/googleapis/java-bigquerystorage/issues/2184)) ([87f93a9](https://github.com/googleapis/java-bigquerystorage/commit/87f93a921c62cd71808cddc35382bbaabb7da54b)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.0 ([#2180](https://github.com/googleapis/java-bigquerystorage/issues/2180)) ([7ce19e7](https://github.com/googleapis/java-bigquerystorage/commit/7ce19e7a4ca47df9590c1023abcc459248b1fec2)) +* Update dependency org.apache.avro:avro to v1.11.2 ([#2177](https://github.com/googleapis/java-bigquerystorage/issues/2177)) ([75ce0b5](https://github.com/googleapis/java-bigquerystorage/commit/75ce0b5d7009bbb47b91c222390cfe864b8bd84e)) + ## [2.39.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.39.0...v2.39.1) (2023-06-22) diff --git a/google-cloud-bigquerystorage-bom/pom.xml b/google-cloud-bigquerystorage-bom/pom.xml index 4748c67fbb..c497f44625 100644 --- a/google-cloud-bigquerystorage-bom/pom.xml +++ b/google-cloud-bigquerystorage-bom/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage-bom - 2.39.2-SNAPSHOT + 2.40.0 pom com.google.cloud @@ -52,37 +52,37 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.2-SNAPSHOT + 2.40.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.2-SNAPSHOT + 2.40.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index 183a275300..38aea24aa6 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage - 2.39.2-SNAPSHOT + 2.40.0 jar BigQuery Storage https://github.com/googleapis/java-bigquerystorage @@ -11,7 +11,7 @@ com.google.cloud google-cloud-bigquerystorage-parent - 2.39.2-SNAPSHOT + 2.40.0 google-cloud-bigquerystorage diff --git a/grpc-google-cloud-bigquerystorage-v1/pom.xml b/grpc-google-cloud-bigquerystorage-v1/pom.xml index 1b5d1352e7..dd440be295 100644 --- a/grpc-google-cloud-bigquerystorage-v1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.2-SNAPSHOT + 2.40.0 grpc-google-cloud-bigquerystorage-v1 GRPC library for grpc-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml index 259ccae9ec..4a6ab39d59 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.2-SNAPSHOT + 0.164.0 grpc-google-cloud-bigquerystorage-v1beta1 GRPC library for grpc-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml index 3efd7165e9..8cf1f37f05 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.2-SNAPSHOT + 0.164.0 grpc-google-cloud-bigquerystorage-v1beta2 GRPC library for grpc-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/pom.xml b/pom.xml index cb3d4fdacf..25b163d72d 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-bigquerystorage-parent pom - 2.39.2-SNAPSHOT + 2.40.0 BigQuery Storage Parent https://github.com/googleapis/java-bigquerystorage @@ -83,37 +83,37 @@ com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.2-SNAPSHOT + 2.40.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.2-SNAPSHOT + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.2-SNAPSHOT + 2.40.0 com.google.cloud google-cloud-bigquerystorage - 2.39.2-SNAPSHOT + 2.40.0 org.json diff --git a/proto-google-cloud-bigquerystorage-v1/pom.xml b/proto-google-cloud-bigquerystorage-v1/pom.xml index 2b6a4ae6c8..94f629692f 100644 --- a/proto-google-cloud-bigquerystorage-v1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.2-SNAPSHOT + 2.40.0 proto-google-cloud-bigquerystorage-v1 PROTO library for proto-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml index 52e7c44886..d08b09cdf3 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.2-SNAPSHOT + 0.164.0 proto-google-cloud-bigquerystorage-v1beta1 PROTO library for proto-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml index f676e54044..128077340e 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.2-SNAPSHOT + 0.164.0 proto-google-cloud-bigquerystorage-v1beta2 PROTO library for proto-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 9c68c71740..455674f0aa 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -29,7 +29,7 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.2-SNAPSHOT + 2.40.0 diff --git a/versions.txt b/versions.txt index 7195cafbeb..b63f957686 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-bigquerystorage:2.39.1:2.39.2-SNAPSHOT -grpc-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.2-SNAPSHOT -grpc-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.2-SNAPSHOT -grpc-google-cloud-bigquerystorage-v1:2.39.1:2.39.2-SNAPSHOT -proto-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.2-SNAPSHOT -proto-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.2-SNAPSHOT -proto-google-cloud-bigquerystorage-v1:2.39.1:2.39.2-SNAPSHOT +google-cloud-bigquerystorage:2.40.0:2.40.0 +grpc-google-cloud-bigquerystorage-v1beta1:0.164.0:0.164.0 +grpc-google-cloud-bigquerystorage-v1beta2:0.164.0:0.164.0 +grpc-google-cloud-bigquerystorage-v1:2.40.0:2.40.0 +proto-google-cloud-bigquerystorage-v1beta1:0.164.0:0.164.0 +proto-google-cloud-bigquerystorage-v1beta2:0.164.0:0.164.0 +proto-google-cloud-bigquerystorage-v1:2.40.0:2.40.0

Range and bits for date/time fields
Field Range #Bits
Year [1, 9999] 14
Month [1, 12] 4