diff --git a/.mvn/maven.config b/.mvn/maven.config index 92af3731b..54bc6dc85 100644 --- a/.mvn/maven.config +++ b/.mvn/maven.config @@ -1,2 +1,2 @@ -Dscala.compat.version=2.12 --Dscala.compat.library.version=2.12.19 +-Dscala.compat.library.version=2.12.20 diff --git a/columnar-fit-performer-shared/pom.xml b/columnar-fit-performer-shared/pom.xml index 3c7a9280d..c7b927afb 100644 --- a/columnar-fit-performer-shared/pom.xml +++ b/columnar-fit-performer-shared/pom.xml @@ -7,7 +7,7 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 columnar-fit-performer-shared diff --git a/columnar-fit-performer-shared/src/main/java/com/couchbase/columnar/fit/core/util/ResultUtil.java b/columnar-fit-performer-shared/src/main/java/com/couchbase/columnar/fit/core/util/ResultUtil.java deleted file mode 100644 index 5d783b7a1..000000000 --- a/columnar-fit-performer-shared/src/main/java/com/couchbase/columnar/fit/core/util/ResultUtil.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2024 Couchbase, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.couchbase.columnar.fit.core.util; - -import fit.columnar.EmptyResultOrFailureResponse; -import fit.columnar.ResponseMetadata; - -public class ResultUtil { - public static EmptyResultOrFailureResponse success(StartTimes startTime) { - return fit.columnar.EmptyResultOrFailureResponse.newBuilder() - .setEmptySuccess(true) - .setMetadata(responseMetadata(startTime)) - .build(); - } - - public static EmptyResultOrFailureResponse failure(Throwable err, StartTimes startTime) { - return fit.columnar.EmptyResultOrFailureResponse.newBuilder() - .setMetadata(responseMetadata(startTime)) - .setError(ErrorUtil.convertError(err)) - .build(); - } - - public static ResponseMetadata responseMetadata(StartTimes startTime) { - if (startTime != null) { - return fit.columnar.ResponseMetadata.newBuilder() - .setElapsedNanos(System.nanoTime() - startTime.asSystem()) - .setInitiated(startTime.asWallclock()) - .build(); - } - else { - // todo remove when fix timings - return fit.columnar.ResponseMetadata.newBuilder().build(); - } - } -} diff --git a/columnar-java-client/examples/maven-project-template/pom.xml b/columnar-java-client/examples/maven-project-template/pom.xml index 0122fcf95..0856d4388 100644 --- a/columnar-java-client/examples/maven-project-template/pom.xml +++ b/columnar-java-client/examples/maven-project-template/pom.xml @@ -6,7 +6,7 @@ com.example couchbase-columnar-java-example - 1.0.0-SNAPSHOT + 1.0.6 Couchbase Columnar Java SDK Project Template Examples project for Couchbase Columnar Java SDK @@ -38,7 +38,17 @@ com.couchbase.client couchbase-columnar-java-client - 0.1.0-SNAPSHOT + 1.0.0 + + + + + org.jetbrains + annotations + 24.1.0 + provided @@ -53,7 +63,8 @@ - + org.apache.maven.plugins maven-checkstyle-plugin diff --git a/columnar-java-client/examples/maven-project-template/src/main/java/com/example/couchbase/columnar/Example.java b/columnar-java-client/examples/maven-project-template/src/main/java/com/example/couchbase/columnar/Example.java index 9e1e56e15..afbb5340c 100644 --- a/columnar-java-client/examples/maven-project-template/src/main/java/com/example/couchbase/columnar/Example.java +++ b/columnar-java-client/examples/maven-project-template/src/main/java/com/example/couchbase/columnar/Example.java @@ -19,8 +19,8 @@ import com.couchbase.columnar.client.java.Cluster; import com.couchbase.columnar.client.java.Credential; import com.couchbase.columnar.client.java.QueryResult; -import com.couchbase.columnar.client.java.internal.Certificates; +import java.time.Duration; import java.util.List; public class Example { @@ -32,10 +32,10 @@ public static void main(String[] args) { try (Cluster cluster = Cluster.newInstance( connectionString, Credential.of(username, password), + // The third parameter is optional. + // This example sets the default query timeout to 2 minutes. clusterOptions -> clusterOptions - // Configure a secure connection to Couchbase internal pre-production cluster. - // (Not required when connecting to a production cluster!) - .security(it -> it.trustOnlyCertificates(Certificates.getNonProdCertificates())) + .timeout(it -> it.queryTimeout(Duration.ofMinutes(2))) )) { // Buffered query. All rows must fit in memory. diff --git a/columnar-java-client/pom.xml b/columnar-java-client/pom.xml index a9a9f45fc..60a02e2a9 100644 --- a/columnar-java-client/pom.xml +++ b/columnar-java-client/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 couchbase-columnar-java-client - 0.1.0 + 1.0.6 Couchbase Columnar Java SDK diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Cluster.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Cluster.java index 9be118489..2c40c11ee 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Cluster.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Cluster.java @@ -21,19 +21,23 @@ import com.couchbase.client.core.env.Authenticator; import com.couchbase.client.core.env.BuilderPropertySetter; import com.couchbase.client.core.env.CoreEnvironment; +import com.couchbase.client.core.env.InvalidPropertyException; import com.couchbase.client.core.transaction.atr.ActiveTransactionRecordIds; import com.couchbase.client.core.transaction.config.CoreTransactionsCleanupConfig; import com.couchbase.client.core.transaction.config.CoreTransactionsConfig; import com.couchbase.client.core.transaction.forwards.CoreTransactionsSupportedExtensions; import com.couchbase.client.core.util.ConnectionString; +import com.couchbase.columnar.client.java.internal.Certificates; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import reactor.core.publisher.Mono; import javax.net.ssl.TrustManagerFactory; import java.io.Closeable; import java.time.Duration; import java.util.Collections; -import java.util.Map; +import java.util.LinkedHashMap; import java.util.Optional; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; @@ -43,7 +47,6 @@ import static com.couchbase.client.core.transaction.config.CoreTransactionsConfig.DEFAULT_TRANSACTION_TIMEOUT; import static java.util.Collections.emptySet; import static java.util.Objects.requireNonNull; -import static java.util.stream.Collectors.toMap; /** * Create a new instance by calling {@link #newInstance}. @@ -65,7 +68,13 @@ * .deserializer(new JacksonDeserializer(new ObjectMapper())) * ); * + * For best efficiency, create a single `Cluster` instance + * per Columnar cluster and share it throughout your application. + *

+ * When you're done interacting with the cluster, it's important to call + * {@link Cluster#close()} to release resources used by the cluster. */ +@ThreadSafe public final class Cluster implements Closeable, Queryable { private final Environment environment; private final CoreCouchbaseOps couchbaseOps; @@ -105,9 +114,21 @@ public static Cluster newInstance( /** * Returns a new instance, with options customized by the {@code optionsCustomizer} callback. + *

+ * Example usage: + *

+   * Cluster cluster = Cluster.newInstance(
+   *     connectionString,
+   *     Credential.of(username, password),
+   *     options -> options
+   *         .timeout(it -> it.queryTimeout(Duration.ofMinutes(5)))
+   *         .deserializer(new JacksonDeserializer(new ObjectMapper()))
+   * );
+   * 
* * @see Credential#of(String, String) * @see #newInstance(String, Credential) + * @see ClusterOptions */ public static Cluster newInstance( String connectionString, @@ -120,21 +141,22 @@ public static Cluster newInstance( throw new IllegalArgumentException("Invalid connection string; must start with secure scheme \"couchbases://\" (note the final 's') but got: " + redactUser(cs.original())); } + checkParameterNamesAreLowercase(cs); + ClusterOptions builder = new ClusterOptions(); optionsCustomizer.accept(builder); - BuilderPropertySetter propertySetter = new BuilderPropertySetter("", Collections.emptyMap()); - propertySetter.set(builder, cs.params()); - - // do we really want to allow a system property to disable server certificate verification? - //propertySetter.set(builder, systemPropertyMap(SYSTEM_PROPERTY_PREFIX)); + applyConnectionStringParameters(builder, cs); ClusterOptions.Unmodifiable opts = builder.build(); Environment.Builder envBuilder = new Environment.Builder() .transactionsConfig(disableTransactionsCleanup()) .deserializer(opts.deserializer()) - .ioConfig(it -> it.maxHttpConnections(Integer.MAX_VALUE)) + .ioConfig(it -> it + .enableDnsSrv(opts.srv()) + .maxHttpConnections(Integer.MAX_VALUE) + ) .securityConfig(it -> { SecurityOptions.Unmodifiable security = opts.security(); @@ -171,6 +193,96 @@ public static Cluster newInstance( return new Cluster(cs, credential.toInternalAuthenticator(), env); } + private static void applyConnectionStringParameters(ClusterOptions builder, ConnectionString cs) { + // Make a mutable copy so we can remove entries that require special handling. + LinkedHashMap params = new LinkedHashMap<>(cs.params()); + + // "security.trust_only_non_prod" is special; it doesn't have a corresponding programmatic + // config option. It's not a secret, but we don't want to confuse external users with a + // security config option they never need to set. + boolean trustOnlyNonProdCertificates = lastTrustParamIsNonProd(params); + + try { + BuilderPropertySetter propertySetter = new BuilderPropertySetter("", Collections.emptyMap(), Cluster::lowerSnakeCaseToLowerCamelCase); + propertySetter.set(builder, params); + + } catch (InvalidPropertyException e) { + // Translate core-io exception (internal API) to platform exception! + throw new IllegalArgumentException(e.getMessage(), e.getCause()); + } + + // Do this last, after any other "trust_only_*" params are validated and applied. + // Otherwise, the earlier params would clobber the config set by this param. + // (There's no compelling use case for including multiple "trust_only_*" params in + // the connection string, but we behave consistently if someone tries it.) + if (trustOnlyNonProdCertificates) { + builder.security(it -> it.trustOnlyCertificates(Certificates.getNonProdCertificates())); + } + } + + /** + * Returns true if the "security.trust_only_non_prod" connection string param is + * present, and no other trust params appear after it (since last one wins). + *

+ * Side effect: Removes that param from the map. + * + * @throws IllegalArgumentException if the param has an invalid value + */ + private static boolean lastTrustParamIsNonProd(LinkedHashMap params) { + final String TRUST_ONLY_NON_PROD_PARAM = "security.trust_only_non_prod"; + + // Last trust param wins, so check whether "trust only non-prod" was last trust param. + boolean trustOnlyNonProdWasLast = params.keySet().stream() + .filter(it -> it.startsWith("security.trust_")) + .reduce((a, b) -> b) // last + .orElse("") + .equals(TRUST_ONLY_NON_PROD_PARAM); + + // Always remove it, so later processing doesn't treat it as unrecognized param. + String trustOnlyNonProdValue = params.remove(TRUST_ONLY_NON_PROD_PARAM); + + // Always validate if present, regardless of whether it was last. + if (trustOnlyNonProdValue != null && !Set.of("", "true", "1").contains(trustOnlyNonProdValue)) { + throw new IllegalArgumentException("Invalid value for connection string property '" + TRUST_ONLY_NON_PROD_PARAM + "'; expected 'true', '1', or empty string, but got: '" + trustOnlyNonProdValue + "'"); + } + + return trustOnlyNonProdWasLast; + } + + private static void checkParameterNamesAreLowercase(ConnectionString cs) { + cs.params().keySet().stream() + .filter(Cluster::hasUppercase) + .findFirst() + .ifPresent(badName -> { + throw new IllegalArgumentException("Invalid connection string parameter '" + badName + "'. Please use lower_snake_case in connection string parameter names."); + }); + } + + private static boolean hasUppercase(String s) { + return s.codePoints().anyMatch(Character::isUpperCase); + } + + private static String lowerSnakeCaseToLowerCamelCase(String s) { + StringBuilder sb = new StringBuilder(); + int[] codePoints = s.codePoints().toArray(); + + boolean prevWasUnderscore = false; + for (int i : codePoints) { + if (i == '_') { + prevWasUnderscore = true; + continue; + } + + if (prevWasUnderscore) { + i = Character.toUpperCase(i); + } + sb.appendCodePoint(i); + prevWasUnderscore = false; + } + + return sb.toString(); + } + private static CoreTransactionsConfig disableTransactionsCleanup() { return new CoreTransactionsConfig( DEFAULT_TRANSACTION_DURABILITY_LEVEL, @@ -186,21 +298,6 @@ private static CoreTransactionsConfig disableTransactionsCleanup() { ); } - private static final String SYSTEM_PROPERTY_PREFIX = "com.couchbase.columnar.env."; - - /** - * Returns a map of all system properties whose names start with the given prefix, - * transformed to remove the prefix. - */ - private static Map systemPropertyMap(String prefix) { - return System.getProperties() - .entrySet() - .stream() - .filter(entry -> entry.getKey() instanceof String && entry.getValue() instanceof String) - .filter(entry -> ((String) entry.getKey()).startsWith(prefix)) - .collect(toMap(e -> ((String) e.getKey()).substring(prefix.length()), e -> (String) e.getValue())); - } - /** * @see #newInstance */ @@ -218,11 +315,11 @@ private Cluster( this.queryExecutor = new QueryExecutor(core, environment, connectionString); } + /** + * Releases resources and prevents further use of this object. + */ public void close() { - close(environment.timeoutConfig().disconnectTimeout()); - } - - public void close(Duration timeout) { + Duration timeout = environment.timeoutConfig().disconnectTimeout(); disconnectInternal(disconnected, timeout, couchbaseOps, environment).block(); } @@ -237,6 +334,15 @@ static Mono disconnectInternal( .then(Mono.fromRunnable(() -> disconnected.set(true))); } + /** + * Returns the database in this cluster with the given name. + *

+ * A database is a container for {@link Scope}s. + *

+ * If the database does not exist, this method still returns a + * non-null object, but operations using that object fail with + * an exception indicating the database does not exist. + */ public Database database(String name) { return new Database(this, name); } diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/ClusterOptions.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/ClusterOptions.java index 52c6e0151..d4e3df530 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/ClusterOptions.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/ClusterOptions.java @@ -22,7 +22,13 @@ import java.util.function.Consumer; +/** + * A mutable builder for configuring the cluster's behavior. + * + * @see Cluster#newInstance(String, Credential, Consumer) + */ public final class ClusterOptions { + boolean srv = true; @Nullable Deserializer deserializer; final SecurityOptions security = new SecurityOptions(); final TimeoutOptions timeout = new TimeoutOptions(); @@ -34,6 +40,15 @@ Unmodifiable build() { return new Unmodifiable(this); } + /** + * Specifies whether the SDK should treat the connection string address + * as a DNS SRV record. Defaults to true. + */ + public ClusterOptions srv(boolean useDnsSrv) { + this.srv = useDnsSrv; + return this; + } + /** * Sets the default deserializer for converting query result rows into Java objects. *

@@ -65,10 +80,12 @@ static class Unmodifiable { private final TimeoutOptions.Unmodifiable timeout; private final SecurityOptions.Unmodifiable security; private final Deserializer deserializer; + private final boolean srv; Unmodifiable(ClusterOptions builder) { this.timeout = builder.timeout.build(); this.security = builder.security.build(); + this.srv = builder.srv; this.deserializer = builder.deserializer != null ? builder.deserializer @@ -87,12 +104,17 @@ public Deserializer deserializer() { return deserializer; } + public boolean srv() { + return srv; + } + @Override public String toString() { return "ClusterOptions{" + "timeout=" + timeout + ", security=" + security + ", deserializer=" + deserializer + + ", srv=" + srv + '}'; } diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Credential.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Credential.java index 7be3b46a0..bf7400451 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Credential.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Credential.java @@ -20,6 +20,7 @@ import com.couchbase.client.core.env.Authenticator; import com.couchbase.client.core.env.PasswordAuthenticator; import com.couchbase.columnar.client.java.internal.DynamicAuthenticator; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import java.util.function.Supplier; @@ -34,6 +35,7 @@ * For advanced use cases involving dynamic credentials, see * {@link Credential#ofDynamic(Supplier)}. */ +@ThreadSafe public abstract class Credential { /** diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Database.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Database.java index 12dad16e7..c108ce2e5 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Database.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Database.java @@ -16,8 +16,14 @@ package com.couchbase.columnar.client.java; +import com.couchbase.columnar.client.java.internal.ThreadSafe; + import static java.util.Objects.requireNonNull; +/** + * Contains {@link Scope}s. + */ +@ThreadSafe public final class Database { private final Cluster cluster; private final String name; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetadata.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetadata.java index cd387032a..2e88b172c 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetadata.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetadata.java @@ -19,6 +19,7 @@ import com.couchbase.client.core.error.ErrorCodeAndMessage; import com.couchbase.client.core.msg.analytics.AnalyticsChunkHeader; import com.couchbase.client.core.msg.analytics.AnalyticsChunkTrailer; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import java.util.Collections; import java.util.List; @@ -27,6 +28,7 @@ /** * Holds associated metadata returned by the server. */ +@ThreadSafe public final class QueryMetadata { private final AnalyticsChunkHeader header; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetrics.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetrics.java index dbd853a41..2e5ac1853 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetrics.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryMetrics.java @@ -21,6 +21,7 @@ import com.couchbase.client.core.json.Mapper; import com.couchbase.client.core.util.Golang; import com.couchbase.columnar.client.java.internal.JacksonTransformers; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import java.io.IOException; import java.time.Duration; @@ -31,6 +32,7 @@ /** * Holds the metrics as returned from an analytics response. */ +@ThreadSafe public final class QueryMetrics { /** diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryResult.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryResult.java index 5b6826366..8d52d4f57 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryResult.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryResult.java @@ -18,11 +18,13 @@ import com.couchbase.client.core.msg.analytics.AnalyticsChunkHeader; import com.couchbase.client.core.msg.analytics.AnalyticsChunkTrailer; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import java.util.List; import static com.couchbase.client.core.util.CbCollections.listCopyOf; +@ThreadSafe(caveat = "Unless you modify the byte array returned by Row.bytes()") public final class QueryResult { private final List rows; private final QueryMetadata metadata; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryWarning.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryWarning.java index 0a7d94f15..ae3f9fe0c 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryWarning.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/QueryWarning.java @@ -17,6 +17,7 @@ package com.couchbase.columnar.client.java; import com.couchbase.client.core.error.ErrorCodeAndMessage; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import static java.util.Objects.requireNonNull; @@ -25,6 +26,7 @@ *

* Note that warnings are not terminal errors, but hints from the engine that something went not as expected. */ +@ThreadSafe public final class QueryWarning { private final ErrorCodeAndMessage inner; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Queryable.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Queryable.java index ebb1e85f9..d4b478b30 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Queryable.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Queryable.java @@ -16,9 +16,12 @@ package com.couchbase.columnar.client.java; +import com.couchbase.columnar.client.java.internal.ThreadSafe; + import java.util.concurrent.CancellationException; import java.util.function.Consumer; +@ThreadSafe public interface Queryable { /** diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Row.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Row.java index 3ed7620b6..6e43ec2ec 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Row.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Row.java @@ -19,6 +19,7 @@ import com.couchbase.columnar.client.java.codec.Deserializer; import com.couchbase.columnar.client.java.codec.TypeRef; import com.couchbase.columnar.client.java.internal.InternalJacksonSerDes; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import com.couchbase.columnar.client.java.json.JsonArray; import com.couchbase.columnar.client.java.json.JsonObject; import com.couchbase.columnar.client.java.json.JsonValue; @@ -61,6 +62,7 @@ * @see ClusterOptions#deserializer(Deserializer) * @see QueryOptions#deserializer(Deserializer) */ +@ThreadSafe(caveat = "Unless you modify the byte array returned by Row.bytes()") public final class Row { private final byte[] content; private final Deserializer deserializer; @@ -76,6 +78,8 @@ public final class Row { /** * Returns the raw content of the row, exactly as it was received from * the server. + *

+ * This method returns the same array each time it is called. */ public byte[] bytes() { return content; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Scope.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Scope.java index 34c8cd05f..cf481004b 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Scope.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/Scope.java @@ -18,11 +18,13 @@ import com.couchbase.client.core.api.manager.CoreBucketAndScope; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import java.util.function.Consumer; import static java.util.Objects.requireNonNull; +@ThreadSafe public final class Scope implements Queryable { private final Cluster cluster; private final Database database; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/SecurityOptions.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/SecurityOptions.java index 648afd4aa..3a2d0a9a1 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/SecurityOptions.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/SecurityOptions.java @@ -36,7 +36,7 @@ public final class SecurityOptions { private List cipherSuites = emptyList(); @Nullable private TrustSource trustSource = null; - private boolean verifyServerCertificate = true; + private boolean disableServerCertificateVerification = false; Unmodifiable build() { return new Unmodifiable(this); @@ -107,7 +107,7 @@ public SecurityOptions trustOnlyFactory(TrustManagerFactory factory) { /** * Server certification verification is enabled by default. - * You can disable it by passing false to this method, + * You can disable it by passing true to this method, * but you almost certainly shouldn't. Instead, call one of the * {@code trust} methods to tell the SDK which certificates * it should trust. @@ -116,7 +116,7 @@ public SecurityOptions trustOnlyFactory(TrustManagerFactory factory) { * because it exposes you to on-path attacks. Never do this in production. * In fact, you probably shouldn't do it anywhere. * - * @param verify If false, the SDK does not verify the certificate + * @param disable If true, the SDK does not verify the certificate * presented by the server. * @see #trustOnlyPemFile(Path) * @see #trustOnlyPemString(String) @@ -126,8 +126,8 @@ public SecurityOptions trustOnlyFactory(TrustManagerFactory factory) { * is almost always a bad idea. */ @Deprecated - public SecurityOptions verifyServerCertificate(boolean verify) { - this.verifyServerCertificate = verify; + public SecurityOptions disableServerCertificateVerification(boolean disable) { + this.disableServerCertificateVerification = disable; return this; } @@ -142,9 +142,9 @@ static class Unmodifiable { Unmodifiable(SecurityOptions builder) { this.cipherSuites = builder.cipherSuites; - this.trustSource = builder.verifyServerCertificate - ? (builder.trustSource != null ? builder.trustSource : TrustSource.from(Certificates.getCapellaCertificates())) - : TrustSource.insecure(); + this.trustSource = builder.disableServerCertificateVerification + ? TrustSource.insecure() + : (builder.trustSource != null ? builder.trustSource : TrustSource.from(Certificates.getCapellaCertificates())); } public List cipherSuites() { diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/Deserializer.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/Deserializer.java index a047b787f..a879875b4 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/Deserializer.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/Deserializer.java @@ -16,6 +16,7 @@ package com.couchbase.columnar.client.java.codec; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import reactor.util.annotation.Nullable; import java.io.IOException; @@ -23,6 +24,7 @@ /** * Converts query result rows into Java objects. */ +@ThreadSafe public interface Deserializer { /** * Deserializes raw input into the target class. diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/JacksonDeserializer.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/JacksonDeserializer.java index 8b6e929d6..57900dac7 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/JacksonDeserializer.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/JacksonDeserializer.java @@ -18,6 +18,9 @@ // CHECKSTYLE:OFF IllegalImport - Allow unbundled Jackson +import com.couchbase.columnar.client.java.ClusterOptions; +import com.couchbase.columnar.client.java.QueryOptions; +import com.couchbase.columnar.client.java.internal.ThreadSafe; import com.fasterxml.jackson.databind.JavaType; import com.fasterxml.jackson.databind.ObjectMapper; @@ -35,7 +38,11 @@ * .build(); * var deserializer = new JacksonDeserializer(mapper); * + * + * @see ClusterOptions#deserializer(Deserializer) + * @see QueryOptions#deserializer(Deserializer) */ +@ThreadSafe public final class JacksonDeserializer implements Deserializer { private final ObjectMapper mapper; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/TypeRef.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/TypeRef.java index 6d2ec0c7c..7716b98d1 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/TypeRef.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/TypeRef.java @@ -16,6 +16,8 @@ package com.couchbase.columnar.client.java.codec; +import com.couchbase.columnar.client.java.internal.ThreadSafe; + import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; @@ -34,6 +36,7 @@ * * Super Type Tokens. */ +@ThreadSafe public abstract class TypeRef { private final Type type; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/package-info.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/package-info.java index 4ec46a559..5d794279d 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/package-info.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/codec/package-info.java @@ -14,6 +14,10 @@ * limitations under the License. */ +/** + * Classes that turn query result rows into Java objects + * when you call {@code row.as(Class)} or {@code row.as(TypeRef)}. + */ @NonNullApi package com.couchbase.columnar.client.java.codec; diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/internal/ThreadSafe.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/internal/ThreadSafe.java new file mode 100644 index 000000000..927d4a1e0 --- /dev/null +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/internal/ThreadSafe.java @@ -0,0 +1,43 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.columnar.client.java.internal; + +import org.jetbrains.annotations.ApiStatus; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Documents that the annotated type is thread-safe. + *

+ * When applied to an interface or abstract class, + * documents that implementations and subclasses must be thread-safe. + *

+ * This annotation only provides information; nothing magical happens + * when it is present. The implementor of the annotated class/interface + * is responsible for ensuring it is actually thread-safe. + */ +@ApiStatus.Internal +@Retention(RetentionPolicy.SOURCE) +@Documented +@Target(ElementType.TYPE) +public @interface ThreadSafe { + String caveat() default ""; +} diff --git a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/package-info.java b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/package-info.java index 0954f8a5a..ec0440988 100644 --- a/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/package-info.java +++ b/columnar-java-client/src/main/java/com/couchbase/columnar/client/java/package-info.java @@ -14,6 +14,9 @@ * limitations under the License. */ +/** + * Start here by creating a {@link com.couchbase.columnar.client.java.Cluster}. + */ @NonNullApi package com.couchbase.columnar.client.java; diff --git a/columnar-java-client/src/test/java/com/couchbase/columnar/client/java/sandbox/Sandbox.java b/columnar-java-client/src/test/java/com/couchbase/columnar/client/java/sandbox/Sandbox.java index 73efa5a2c..eca5731c4 100644 --- a/columnar-java-client/src/test/java/com/couchbase/columnar/client/java/sandbox/Sandbox.java +++ b/columnar-java-client/src/test/java/com/couchbase/columnar/client/java/sandbox/Sandbox.java @@ -35,7 +35,7 @@ public class Sandbox { public static void main(String[] args) throws Exception { - String connectionString = "couchbases://127.0.0.1?security.verifyServerCertificate=false"; + String connectionString = "couchbases://127.0.0.1?security.disable_server_certificate_verification=true&srv=0"; String username = "Administrator"; String password = "password"; diff --git a/columnar-java-fit-performer/pom.xml b/columnar-java-fit-performer/pom.xml index 987e796a4..8d0b9a187 100644 --- a/columnar-java-fit-performer/pom.xml +++ b/columnar-java-fit-performer/pom.xml @@ -7,7 +7,7 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 columnar-java-fit-performer diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/cluster/ColumnarClusterConnection.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/cluster/ColumnarClusterConnection.java index 8061d410b..e1520e72b 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/cluster/ColumnarClusterConnection.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/cluster/ColumnarClusterConnection.java @@ -87,9 +87,8 @@ public ColumnarClusterConnection(fit.columnar.ClusterNewInstanceRequest request, if (secOptions.hasTrustOnlyPlatform()) { sec.trustOnlyJvm(); } - if (secOptions.hasVerifyServerCertificate()) { - //noinspection deprecation - sec.verifyServerCertificate(secOptions.getVerifyServerCertificate()); + if (secOptions.hasDisableServerCertificateVerification()) { + sec.disableServerCertificateVerification(secOptions.getDisableServerCertificateVerification()); } if (secOptions.getCipherSuitesCount() > 0) { sec.cipherSuites(secOptions.getCipherSuitesList()); diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/content/ContentAsUtil.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/content/ContentAsUtil.java index a36220268..57505d970 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/content/ContentAsUtil.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/content/ContentAsUtil.java @@ -29,7 +29,8 @@ public static Try contentType( fit.columnar.ContentAs contentAs, Supplier asByteArray, Supplier asList, - Supplier asMap + Supplier asMap, + Supplier asString ) { try { if (contentAs.hasAsByteArray()) { @@ -44,6 +45,10 @@ public static Try contentType( return new Try<>(fit.columnar.ContentWas.newBuilder() .setContentWasMap(ProtobufConversions.jsonObjectToStruct(asMap.get())) .build()); + } else if (contentAs.hasAsString()) { + return new Try<>(fit.columnar.ContentWas.newBuilder() + .setContentWasString(asString.get()) + .build()); } else { throw new UnsupportedOperationException("Java performer cannot handle contentAs " + contentAs); } diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryOptionsUtil.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryOptionsUtil.java index dd9aa4a42..21511c46c 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryOptionsUtil.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryOptionsUtil.java @@ -19,6 +19,7 @@ import com.couchbase.columnar.client.java.QueryOptions; import com.couchbase.columnar.client.java.QueryPriority; import com.couchbase.columnar.client.java.ScanConsistency; +import com.couchbase.columnar.util.CustomDeserializer; import com.couchbase.columnar.util.grpc.ProtobufConversions; import reactor.util.annotation.Nullable; @@ -60,6 +61,10 @@ public class QueryOptionsUtil { if (opts.hasTimeout()) { options.timeout(Duration.ofSeconds(opts.getTimeout().getSeconds())); } + if (opts.hasDeserializer() && opts.getDeserializer().hasCustom()) { + CustomDeserializer customDeserializer = new CustomDeserializer(); + options.deserializer(customDeserializer); + } }; } } diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultBufferedStreamer.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultBufferedStreamer.java index 78d77cdd7..dae3663ae 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultBufferedStreamer.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultBufferedStreamer.java @@ -19,7 +19,7 @@ import com.couchbase.columnar.client.java.QueryResult; import com.couchbase.columnar.client.java.Queryable; import com.couchbase.columnar.client.java.Row; -import com.couchbase.columnar.fit.core.util.ResultUtil; +import com.couchbase.columnar.util.ResultUtil; import fit.columnar.EmptyResultOrFailureResponse; import fit.columnar.ExecuteQueryRequest; import fit.columnar.QueryResultMetadataResponse; diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultPushBasedStreamer.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultPushBasedStreamer.java index 477f355c3..d868808df 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultPushBasedStreamer.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryResultPushBasedStreamer.java @@ -23,7 +23,7 @@ import com.couchbase.columnar.client.java.json.JsonArray; import com.couchbase.columnar.client.java.json.JsonObject; import com.couchbase.columnar.content.ContentAsUtil; -import com.couchbase.columnar.fit.core.util.ErrorUtil; +import com.couchbase.columnar.util.ErrorUtil; import fit.columnar.EmptyResultOrFailureResponse; import fit.columnar.ExecuteQueryRequest; import fit.columnar.QueryResultMetadataResponse; @@ -152,10 +152,13 @@ private void handleRow(Row row) { private RowProcessingResult processRow(Row row) { if (executeQueryRequest.hasContentAs()) { - var content = ContentAsUtil.contentType(executeQueryRequest.getContentAs(), + var content = ContentAsUtil.contentType( + executeQueryRequest.getContentAs(), () -> row.bytes(), () -> row.asNullable(JsonArray.class), - () -> row.asNullable(JsonObject.class)); + () -> row.asNullable(JsonObject.class), + () -> row.asNullable(String.class) + ); if (content.isSuccess()) { return new RowProcessingResult(null, fit.columnar.QueryRowResponse.newBuilder() diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryRowUtil.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryRowUtil.java index 9f083c862..8932016d3 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryRowUtil.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/query/QueryRowUtil.java @@ -20,7 +20,7 @@ import com.couchbase.columnar.client.java.json.JsonArray; import com.couchbase.columnar.client.java.json.JsonObject; import com.couchbase.columnar.content.ContentAsUtil; -import com.couchbase.columnar.fit.core.util.ErrorUtil; +import com.couchbase.columnar.util.ErrorUtil; import fit.columnar.QueryRowResponse; import javax.annotation.Nullable; @@ -36,7 +36,8 @@ public static RowProcessingResult processRow(fit.columnar.ExecuteQueryRequest ex executeQueryRequest.getContentAs(), () -> row.bytes(), () -> row.asNullable(JsonArray.class), - () -> row.asNullable(JsonObject.class) + () -> row.asNullable(JsonObject.class), + () -> row.asNullable(String.class) ); if (content.isSuccess()) { diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarCrossService.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarCrossService.java index 1336aa867..87aebf4bf 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarCrossService.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarCrossService.java @@ -21,8 +21,8 @@ import com.couchbase.columnar.query.ExecuteQueryStreamer; import com.couchbase.columnar.query.QueryResultBufferedStreamer; import com.couchbase.columnar.query.QueryResultPushBasedStreamer; -import com.couchbase.columnar.fit.core.util.ErrorUtil; -import com.couchbase.columnar.fit.core.util.ResultUtil; +import com.couchbase.columnar.util.ErrorUtil; +import com.couchbase.columnar.util.ResultUtil; import fit.columnar.CloseAllQueryResultsRequest; import fit.columnar.CloseQueryResultRequest; import fit.columnar.ColumnarCrossServiceGrpc; diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarService.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarService.java index 255ee38ba..1b5c316dc 100644 --- a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarService.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/rpc/JavaColumnarService.java @@ -19,15 +19,16 @@ import com.couchbase.client.protocol.shared.EchoRequest; import com.couchbase.client.protocol.shared.EchoResponse; import com.couchbase.columnar.cluster.ColumnarClusterConnection; -import com.couchbase.columnar.modes.Mode; import com.couchbase.columnar.fit.core.exceptions.ExceptionGrpcMappingUtil; -import com.couchbase.columnar.fit.core.util.ResultUtil; import com.couchbase.columnar.fit.core.util.VersionUtil; +import com.couchbase.columnar.modes.Mode; +import com.couchbase.columnar.util.ResultUtil; import fit.columnar.CloseAllColumnarClustersRequest; import fit.columnar.ClusterCloseRequest; import fit.columnar.ClusterNewInstanceRequest; import fit.columnar.ColumnarServiceGrpc; import fit.columnar.EmptyResultOrFailureResponse; +import fit.columnar.SdkConnectionError; import io.grpc.stub.StreamObserver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,23 +54,32 @@ public void fetchPerformerCaps(fit.columnar.FetchPerformerCapsRequest request, S builder.setSdkVersion(sdkVersion); } builder.setSdk(fit.columnar.SDK.SDK_JAVA); - builder.putClusterNewInstance(0, fit.columnar.PerApiElementGeneric.getDefaultInstance()); - builder.putClusterClose(0, fit.columnar.PerApiElementGeneric.getDefaultInstance()); + builder.putClusterNewInstance(0, fit.columnar.PerApiElementClusterNewInstance.getDefaultInstance()); + builder.putClusterClose(0, fit.columnar.PerApiElementClusterClose.getDefaultInstance()); // The SDK has two main modes: buffered and push-based streaming. var executeQueryBuffered = fit.columnar.PerApiElementExecuteQuery.newBuilder() .setExecuteQueryReturns(fit.columnar.PerApiElementExecuteQuery.ExecuteQueryReturns.EXECUTE_QUERY_RETURNS_QUERY_RESULT) .setRowIteration(fit.columnar.PerApiElementExecuteQuery.RowIteration.ROW_ITERATION_BUFFERED) .setRowDeserialization(fit.columnar.PerApiElementExecuteQuery.RowDeserialization.ROW_DESERIALIZATION_STATIC_ROW_TYPING_INDIVIDUAL) + .setSupportsCustomDeserializer(true) .build(); var executeQueryPushBased = fit.columnar.PerApiElementExecuteQuery.newBuilder() .setExecuteQueryReturns(fit.columnar.PerApiElementExecuteQuery.ExecuteQueryReturns.EXECUTE_QUERY_RETURNS_QUERY_METADATA) .setRowIteration(fit.columnar.PerApiElementExecuteQuery.RowIteration.ROW_ITERATION_STREAMING_PUSH_BASED) .setRowDeserialization(fit.columnar.PerApiElementExecuteQuery.RowDeserialization.ROW_DESERIALIZATION_STATIC_ROW_TYPING_INDIVIDUAL) + .setSupportsCustomDeserializer(true) .build(); builder.putClusterExecuteQuery(Mode.PUSH_BASED_STREAMING.ordinal(), executeQueryPushBased); builder.putClusterExecuteQuery(Mode.BUFFERED.ordinal(), executeQueryBuffered); builder.putScopeExecuteQuery(Mode.PUSH_BASED_STREAMING.ordinal(), executeQueryPushBased); builder.putScopeExecuteQuery(Mode.BUFFERED.ordinal(), executeQueryBuffered); + for (Mode mode : new Mode[]{Mode.PUSH_BASED_STREAMING, Mode.BUFFERED}) { + builder.putSdkConnectionError(mode.ordinal(), SdkConnectionError.newBuilder() + .setInvalidCredErrorType(SdkConnectionError.InvalidCredentialErrorType.AS_INVALID_CREDENTIAL_EXCEPTION) + .setBootstrapErrorType(SdkConnectionError.BootstrapErrorType.ERROR_AS_TIMEOUT_EXCEPTION) + .build() + ); + } responseObserver.onNext(builder.build()); responseObserver.onCompleted(); } catch (RuntimeException err) { diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/CustomDeserializer.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/CustomDeserializer.java new file mode 100644 index 000000000..2454fd1fb --- /dev/null +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/CustomDeserializer.java @@ -0,0 +1,41 @@ +package com.couchbase.columnar.util; + +import com.couchbase.columnar.client.java.codec.Deserializer; +import com.couchbase.columnar.client.java.codec.TypeRef; +import com.couchbase.columnar.client.java.json.JsonObject; + +/** + * CustomJsonDeserializer provides a generic implementation of the Deserializer interface. + *

+ * This deserializer is designed to handle the conversion of Java objects to String format + * and back, with an additional boolean flag ("Serialized": false) that indicates whether + * the object has been deserialized. The flag is included in the JSON payload and then + * converted to string, making it easy to track the deserialization state of objects. + *

+ * Use Cases: + * - This deserializer can be used in scenarios where you need to deserialize + * objects while keeping track of their deserialization state. + *

+ * Limitations: + * - The current implementation assumes that the input objects can be deserialized into + * string format. Complex or non-standard objects may require additional handling. + * - The `deserialize` methods in this implementation modify the original JSON object + * by setting the `Serialized` flag to `false`, which might not be suitable for + * all use cases. + */ + +public class CustomDeserializer implements Deserializer { + @Override + public T deserialize(Class target, byte[] input) { + JsonObject obj = JsonObject.fromJson(input); + obj.put("Serialized", false); + return (T) obj.toString(); + } + + @Override + public T deserialize(TypeRef target, byte[] input) { + JsonObject obj = JsonObject.fromJson(input); + obj.put("Serialized", false); + return (T) obj.toString(); + } +} diff --git a/columnar-fit-performer-shared/src/main/java/com/couchbase/columnar/fit/core/util/ErrorUtil.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/ErrorUtil.java similarity index 51% rename from columnar-fit-performer-shared/src/main/java/com/couchbase/columnar/fit/core/util/ErrorUtil.java rename to columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/ErrorUtil.java index 4ab993939..166c9a061 100644 --- a/columnar-fit-performer-shared/src/main/java/com/couchbase/columnar/fit/core/util/ErrorUtil.java +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/ErrorUtil.java @@ -13,26 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.couchbase.columnar.fit.core.util; +package com.couchbase.columnar.util; -import fit.columnar.ColumnarErrorType; +import com.couchbase.columnar.client.java.InvalidCredentialException; +import com.couchbase.columnar.client.java.QueryException; +import com.couchbase.columnar.client.java.TimeoutException; import fit.columnar.PlatformErrorType; -import javax.annotation.Nullable; - public class ErrorUtil { private ErrorUtil() { throw new AssertionError("not instantiable"); } - private static @Nullable fit.columnar.ColumnarErrorType convertColumnarError(Throwable exception) { + private static boolean isColumnarError(Throwable exception) { String simpleName = exception.getClass().getSimpleName(); return switch (simpleName) { - case "QueryException" -> ColumnarErrorType.COLUMNAR_EXCEPTION_QUERY; - case "InvalidCredentialException" -> ColumnarErrorType.COLUMNAR_EXCEPTION_INVALID_CREDENTIAL; - case "TimeoutException" -> ColumnarErrorType.COLUMNAR_EXCEPTION_TIMEOUT; - default -> null; + case "QueryException", "InvalidCredentialException", "TimeoutException", "ColumnarException" -> true; + default -> false; }; } @@ -45,12 +43,29 @@ private static fit.columnar.PlatformErrorType convertPlatformError(Throwable exc public static fit.columnar.Error convertError(Throwable raw) { var ret = fit.columnar.Error.newBuilder(); - var type = ErrorUtil.convertColumnarError(raw); - - if (type != null) { + if (isColumnarError(raw)) { var out = fit.columnar.ColumnarError.newBuilder() - .setType(type) - .setAsString(raw.toString()); + .setAsString(raw.toString()); + + if (raw instanceof QueryException queryException) { + out.setSubException(fit.columnar.SubColumnarError.newBuilder().setQueryException( + fit.columnar.QueryException.newBuilder() + .setErrorCode(queryException.code()) + .setServerMessage(queryException.serverMessage()) + .build()) + .build()); + } + if (raw instanceof InvalidCredentialException) { + out.setSubException(fit.columnar.SubColumnarError.newBuilder().setInvalidCredentialException( + fit.columnar.InvalidCredentialException.newBuilder().build()) + .build()); + } + + if (raw instanceof TimeoutException) { + out.setSubException(fit.columnar.SubColumnarError.newBuilder().setTimeoutException(fit.columnar.TimeoutException.newBuilder().build()) + .build()); + } + if (raw.getCause() != null) { out.setCause(convertError(raw.getCause())); } @@ -58,8 +73,8 @@ public static fit.columnar.Error convertError(Throwable raw) { ret.setColumnar(out); } else { ret.setPlatform(fit.columnar.PlatformError.newBuilder() - .setType(convertPlatformError(raw)) - .setAsString(raw.toString())); + .setType(convertPlatformError(raw)) + .setAsString(raw.toString())); } return ret.build(); diff --git a/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/ResultUtil.java b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/ResultUtil.java new file mode 100644 index 000000000..0bfbb30fd --- /dev/null +++ b/columnar-java-fit-performer/src/main/java/com/couchbase/columnar/util/ResultUtil.java @@ -0,0 +1,34 @@ +package com.couchbase.columnar.util; + +import com.couchbase.columnar.fit.core.util.StartTimes; +import fit.columnar.EmptyResultOrFailureResponse; +import fit.columnar.ResponseMetadata; + +public class ResultUtil { + public static EmptyResultOrFailureResponse success(StartTimes startTime) { + return fit.columnar.EmptyResultOrFailureResponse.newBuilder() + .setEmptySuccess(true) + .setMetadata(responseMetadata(startTime)) + .build(); + } + + public static EmptyResultOrFailureResponse failure(Throwable err, StartTimes startTime) { + return fit.columnar.EmptyResultOrFailureResponse.newBuilder() + .setMetadata(responseMetadata(startTime)) + .setError(ErrorUtil.convertError(err)) + .build(); + } + + public static ResponseMetadata responseMetadata(StartTimes startTime) { + if (startTime != null) { + return fit.columnar.ResponseMetadata.newBuilder() + .setElapsedNanos(System.nanoTime() - startTime.asSystem()) + .setInitiated(startTime.asWallclock()) + .build(); + } + else { + // todo remove when fix timings + return fit.columnar.ResponseMetadata.newBuilder().build(); + } + } +} diff --git a/config/checkstyle/checkstyle-basic.xml b/config/checkstyle/checkstyle-basic.xml index 952ef6a7e..18b9ebcf2 100644 --- a/config/checkstyle/checkstyle-basic.xml +++ b/config/checkstyle/checkstyle-basic.xml @@ -16,6 +16,10 @@ + + + + 1.16.6 fit-performer-core diff --git a/core-fit-performer/src/main/java/com/couchbase/client/performer/core/CorePerformer.java b/core-fit-performer/src/main/java/com/couchbase/client/performer/core/CorePerformer.java index ee3fd614f..0cc3ad993 100644 --- a/core-fit-performer/src/main/java/com/couchbase/client/performer/core/CorePerformer.java +++ b/core-fit-performer/src/main/java/com/couchbase/client/performer/core/CorePerformer.java @@ -64,6 +64,7 @@ public void performerCapsFetch(PerformerCapsFetchRequest request, StreamObserver .addPerformerCaps(Caps.GRPC_TESTING) // Add any shared caps here that all 3 performers possess: .addPerformerCaps(Caps.KV_SUPPORT_1) + .addPerformerCaps(Caps.TXN_CLIENT_CONTEXT_ID_SUPPORT) .addSdkImplementationCaps(com.couchbase.client.protocol.sdk.Caps.WAIT_UNTIL_READY) .addSdkImplementationCaps(com.couchbase.client.protocol.sdk.Caps.PROTOSTELLAR) .addSdkImplementationCaps(com.couchbase.client.protocol.sdk.Caps.SDK_SEARCH_RFC_REVISION_11) diff --git a/core-io-deps/pom.xml b/core-io-deps/pom.xml index a0607e84a..5b50c4741 100644 --- a/core-io-deps/pom.xml +++ b/core-io-deps/pom.xml @@ -6,7 +6,7 @@ com.couchbase.client core-io-deps - 1.7.3 + 1.7.6 jar Couchbase JVM Core IO Dependencies @@ -19,7 +19,7 @@ UTF-8 - 4.1.112.Final + 4.1.115.Final 2.17.2 com.couchbase.client.core.deps. @@ -79,7 +79,7 @@ org.jctools jctools-core - 4.0.1 + 4.0.5 com.fasterxml.jackson.core diff --git a/core-io/pom.xml b/core-io/pom.xml index d5ca3814a..c3d40dcd7 100644 --- a/core-io/pom.xml +++ b/core-io/pom.xml @@ -7,11 +7,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 core-io - 3.7.3 + 3.7.6 diff --git a/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderIntegrationTest.java b/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderIntegrationTest.java index 1728058ee..3a94cb781 100644 --- a/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderIntegrationTest.java +++ b/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderIntegrationTest.java @@ -22,7 +22,7 @@ import com.couchbase.client.core.config.ProposedBucketConfigContext; import com.couchbase.client.core.diagnostics.ClusterState; import com.couchbase.client.core.env.CoreEnvironment; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.service.ServiceType; import com.couchbase.client.core.util.ConfigWaitHelper; import com.couchbase.client.core.util.CoreIntegrationTest; diff --git a/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/GlobalLoaderIntegrationTest.java b/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/GlobalLoaderIntegrationTest.java index 34c6475df..0b5c3724c 100644 --- a/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/GlobalLoaderIntegrationTest.java +++ b/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/GlobalLoaderIntegrationTest.java @@ -19,7 +19,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.config.ProposedGlobalConfigContext; import com.couchbase.client.core.env.CoreEnvironment; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.ConfigWaitHelper; import com.couchbase.client.core.util.CoreIntegrationTest; import com.couchbase.client.test.Capabilities; diff --git a/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderIntegrationTest.java b/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderIntegrationTest.java index 5ebd665e6..2c92f7924 100644 --- a/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderIntegrationTest.java +++ b/core-io/src/integrationTest/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderIntegrationTest.java @@ -19,7 +19,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.config.ProposedBucketConfigContext; import com.couchbase.client.core.env.CoreEnvironment; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.ConfigWaitHelper; import com.couchbase.client.core.util.CoreIntegrationTest; import com.couchbase.client.test.Services; diff --git a/core-io/src/integrationTest/java/com/couchbase/client/core/config/refresher/GlobalBucketRefresherIntegrationTest.java b/core-io/src/integrationTest/java/com/couchbase/client/core/config/refresher/GlobalBucketRefresherIntegrationTest.java index 11d2eebb8..c667e8eb8 100644 --- a/core-io/src/integrationTest/java/com/couchbase/client/core/config/refresher/GlobalBucketRefresherIntegrationTest.java +++ b/core-io/src/integrationTest/java/com/couchbase/client/core/config/refresher/GlobalBucketRefresherIntegrationTest.java @@ -22,7 +22,7 @@ import com.couchbase.client.core.config.ProposedGlobalConfigContext; import com.couchbase.client.core.config.loader.GlobalLoader; import com.couchbase.client.core.env.CoreEnvironment; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.CoreIntegrationTest; import com.couchbase.client.test.Capabilities; import com.couchbase.client.test.IgnoreWhen; diff --git a/core-io/src/integrationTest/resources/integration.properties b/core-io/src/integrationTest/resources/integration.properties index f151f3c17..6bb61ccea 100644 --- a/core-io/src/integrationTest/resources/integration.properties +++ b/core-io/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/core-io/src/main/java/com/couchbase/client/core/Core.java b/core-io/src/main/java/com/couchbase/client/core/Core.java index ece1848f8..cde50fa38 100644 --- a/core-io/src/main/java/com/couchbase/client/core/Core.java +++ b/core-io/src/main/java/com/couchbase/client/core/Core.java @@ -36,6 +36,7 @@ import com.couchbase.client.core.cnc.CbTracing; import com.couchbase.client.core.cnc.Event; import com.couchbase.client.core.cnc.EventBus; +import com.couchbase.client.core.cnc.RequestTracer; import com.couchbase.client.core.cnc.TracingIdentifiers; import com.couchbase.client.core.cnc.ValueRecorder; import com.couchbase.client.core.cnc.events.core.BucketClosedEvent; @@ -53,11 +54,9 @@ import com.couchbase.client.core.cnc.events.core.WatchdogRunFailedEvent; import com.couchbase.client.core.cnc.events.transaction.TransactionsStartedEvent; import com.couchbase.client.core.cnc.metrics.LoggingMeter; -import com.couchbase.client.core.config.BucketConfig; import com.couchbase.client.core.config.ClusterConfig; import com.couchbase.client.core.config.ConfigurationProvider; import com.couchbase.client.core.config.DefaultConfigurationProvider; -import com.couchbase.client.core.config.GlobalConfig; import com.couchbase.client.core.diagnostics.ClusterState; import com.couchbase.client.core.diagnostics.EndpointDiagnostics; import com.couchbase.client.core.diagnostics.InternalEndpointDiagnostics; @@ -65,12 +64,14 @@ import com.couchbase.client.core.endpoint.http.CoreHttpClient; import com.couchbase.client.core.env.Authenticator; import com.couchbase.client.core.env.CoreEnvironment; +import com.couchbase.client.core.env.RequestTracerDecorator; import com.couchbase.client.core.env.SeedNode; import com.couchbase.client.core.error.AlreadyShutdownException; import com.couchbase.client.core.error.ConfigException; import com.couchbase.client.core.error.GlobalConfigNotFoundException; import com.couchbase.client.core.error.InvalidArgumentException; import com.couchbase.client.core.error.RequestCanceledException; +import com.couchbase.client.core.error.UnambiguousTimeoutException; import com.couchbase.client.core.error.UnsupportedConfigMechanismException; import com.couchbase.client.core.io.CollectionIdentifier; import com.couchbase.client.core.manager.CoreBucketManagerOps; @@ -87,29 +88,37 @@ import com.couchbase.client.core.node.KeyValueLocator; import com.couchbase.client.core.node.Locator; import com.couchbase.client.core.node.Node; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.node.RoundRobinLocator; import com.couchbase.client.core.node.ViewLocator; import com.couchbase.client.core.service.ServiceScope; import com.couchbase.client.core.service.ServiceState; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.ClusterIdentifier; +import com.couchbase.client.core.topology.ClusterIdentifierUtil; +import com.couchbase.client.core.topology.ClusterTopology; +import com.couchbase.client.core.topology.ClusterTopologyWithBucket; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.transaction.cleanup.CoreTransactionsCleanup; import com.couchbase.client.core.transaction.components.CoreTransactionRequest; import com.couchbase.client.core.transaction.context.CoreTransactionsContext; import com.couchbase.client.core.util.ConnectionString; import com.couchbase.client.core.util.CoreIdGenerator; +import com.couchbase.client.core.util.Deadline; import com.couchbase.client.core.util.LatestStateSubscription; import com.couchbase.client.core.util.NanoTimestamp; import reactor.core.Disposable; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.util.annotation.Nullable; +import reactor.util.retry.Retry; import java.time.Duration; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Objects; import java.util.Optional; import java.util.Set; @@ -233,7 +242,8 @@ public class Core implements CoreCouchbaseOps, AutoCloseable { private final Disposable invalidStateWatchdog; /** - * Holds the response metrics per + * Holds the response metrics. + * Note that because tags have to be provided on ValueRecorder creation, every unique combination of tags needs to be represented in the ResponseMetricIdentifier key. */ private final Map responseMetrics = new ConcurrentHashMap<>(); @@ -243,6 +253,8 @@ public class Core implements CoreCouchbaseOps, AutoCloseable { private final ConnectionString connectionString; + private final CoreResources coreResources; + /** * @deprecated Please use {@link #create(CoreEnvironment, Authenticator, ConnectionString)} instead. */ @@ -280,6 +292,24 @@ protected Core( CoreLimiter.incrementAndVerifyNumInstances(environment.eventBus()); this.connectionString = requireNonNull(connectionString); + boolean ignoresAttributes = CbTracing.isInternalTracer(environment.requestTracer()); + RequestTracer requestTracerDecoratedIfRequired = ignoresAttributes + ? environment.requestTracer() + : new RequestTracerDecorator(environment.requestTracer(), () -> { + if (currentConfig == null) { + return null; + } + if (currentConfig.globalConfig() == null) { + return null; + } + return currentConfig.globalConfig().clusterIdent(); + }); + this.coreResources = new CoreResources() { + @Override + public RequestTracer requestTracer() { + return requestTracerDecoratedIfRequired; + } + }; this.coreContext = new CoreContext(this, CoreIdGenerator.nextId(), environment, authenticator); this.configurationProvider = createConfigurationProvider(); this.nodes = new CopyOnWriteArrayList<>(); @@ -319,7 +349,7 @@ protected Core( ); this.transactionsCleanup = new CoreTransactionsCleanup(this, environment.transactionsConfig()); - this.transactionsContext = new CoreTransactionsContext(environment.meter()); + this.transactionsContext = new CoreTransactionsContext(this, environment.meter()); context().environment().eventBus().publish(new TransactionsStartedEvent(environment.transactionsConfig().cleanupConfig().runLostAttemptsCleanupThread(), environment.transactionsConfig().cleanupConfig().runRegularAttemptsCleanupThread())); } @@ -525,6 +555,32 @@ private Mono closeBucket(final String name) { }); } + @Stability.Internal + public Mono waitForClusterTopology(Duration timeout) { + return Mono.defer(() -> { + Deadline deadline = Deadline.of(timeout); + + return Mono.fromCallable(() -> { + ClusterTopology globalTopology = clusterConfig().globalTopology(); + if (globalTopology != null) { + return globalTopology; + } + + for (ClusterTopologyWithBucket topology : clusterConfig().bucketTopologies()) { + return topology; + } + + throw deadline.exceeded() + ? new UnambiguousTimeoutException("Timed out while waiting for cluster topology", null) + : new NoSuchElementException(); // trigger retry! + }) + .retryWhen(Retry + .fixedDelay(Long.MAX_VALUE, Duration.ofMillis(100)) + .filter(t -> t instanceof NoSuchElementException) + ); + }); + } + /** * This method can be used by a caller to make sure a certain service is enabled at the given * target node. @@ -574,9 +630,10 @@ public ValueRecorder responseMetric(final Request request, @Nullable Throwabl } } final String finalExceptionSimpleName = exceptionSimpleName; + final ClusterIdentifier clusterIdent = ClusterIdentifierUtil.fromConfig(currentConfig); - return responseMetrics.computeIfAbsent(new ResponseMetricIdentifier(request, exceptionSimpleName), key -> { - Map tags = new HashMap<>(7); + return responseMetrics.computeIfAbsent(new ResponseMetricIdentifier(request, exceptionSimpleName, clusterIdent), key -> { + Map tags = new HashMap<>(9); if (key.serviceType == null) { // Virtual service if (request instanceof CoreTransactionRequest) { @@ -590,9 +647,21 @@ public ValueRecorder responseMetric(final Request request, @Nullable Throwabl // The LoggingMeter only uses the service and operation labels, so optimise this hot-path by skipping // assigning other labels. if (!isDefaultLoggingMeter) { - tags.put(TracingIdentifiers.ATTR_NAME, key.bucketName); - tags.put(TracingIdentifiers.ATTR_SCOPE, key.scopeName); - tags.put(TracingIdentifiers.ATTR_COLLECTION, key.collectionName); + // Crucial note for Micrometer: + // If we are ever going to output an attribute from a given JVM run then we must always + // output that attribute in this run. Specifying null as an attribute value allows the OTel backend to strip it, and + // the Micrometer backend to provide a default value. + // See (internal to Couchbase) discussion here for full details: + // https://issues.couchbase.com/browse/CBSE-17070?focusedId=779820&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-779820 + // If this rule is not followed, then Micrometer will silently discard some metrics. Micrometer requires that + // every value output under a given metric has the same set of attributes. + + tags.put(TracingIdentifiers.ATTR_NAME, key.bucketName); + tags.put(TracingIdentifiers.ATTR_SCOPE, key.scopeName); + tags.put(TracingIdentifiers.ATTR_COLLECTION, key.collectionName); + + tags.put(TracingIdentifiers.ATTR_CLUSTER_UUID, key.clusterUuid); + tags.put(TracingIdentifiers.ATTR_CLUSTER_NAME, key.clusterName); if (finalExceptionSimpleName != null) { tags.put(TracingIdentifiers.ATTR_OUTCOME, finalExceptionSimpleName); @@ -626,24 +695,11 @@ protected Node createNode(final NodeIdentifier identifier) { */ private Mono maybeRemoveNode(final Node node, final ClusterConfig config) { return Mono.defer(() -> { - boolean stillPresentInBuckets = config - .bucketConfigs() - .values() - .stream() - .flatMap(bc -> bc.nodes().stream()) - .anyMatch(ni -> ni.identifier().equals(node.identifier())); - - - boolean stillPresentInGlobal; - if (config.globalConfig() != null) { - stillPresentInGlobal = config - .globalConfig() - .portInfos() - .stream() - .anyMatch(ni -> ni.identifier().equals(node.identifier())); - } else { - stillPresentInGlobal = false; - } + boolean stillPresentInBuckets = config.bucketTopologies().stream() + .anyMatch(topology -> hasNode(topology, node.identifier())); + + ClusterTopology globalTopology = config.globalTopology(); + boolean stillPresentInGlobal = globalTopology != null && hasNode(globalTopology, node.identifier()); if ((!stillPresentInBuckets && !stillPresentInGlobal) || !node.hasServicesEnabled()) { return node.disconnect().doOnTerminate(() -> nodes.remove(node)); @@ -653,6 +709,11 @@ private Mono maybeRemoveNode(final Node node, final ClusterConfig config) }); } + private static boolean hasNode(ClusterTopology topology, NodeIdentifier nodeId) { + return topology.nodes().stream() + .anyMatch(node -> node.id().equals(nodeId)); + } + /** * This method is used to remove a service from a node. * @@ -688,7 +749,7 @@ public Mono shutdown(final Duration timeout) { invalidStateWatchdog.dispose(); return Flux - .fromIterable(currentConfig.bucketConfigs().keySet()) + .fromIterable(currentConfig.bucketNames()) .flatMap(this::closeBucket) .then(configurationProvider.shutdown()) .then(configurationProcessor.awaitTermination()) @@ -717,18 +778,18 @@ public Mono shutdown(final Duration timeout) { private void reconfigure(Runnable doFinally) { final ClusterConfig configForThisAttempt = currentConfig; - if (configForThisAttempt.bucketConfigs().isEmpty() && configForThisAttempt.globalConfig() == null) { + final ClusterTopology globalTopology = configForThisAttempt.globalTopology(); + final Collection bucketTopologies = configForThisAttempt.bucketTopologies(); + + if (bucketTopologies.isEmpty() && globalTopology == null) { reconfigureDisconnectAll(doFinally); return; } final NanoTimestamp start = NanoTimestamp.now(); - Flux bucketConfigFlux = Flux - .just(configForThisAttempt) - .flatMap(cc -> Flux.fromIterable(cc.bucketConfigs().values())); - reconfigureBuckets(bucketConfigFlux) - .then(reconfigureGlobal(configForThisAttempt.globalConfig())) + reconfigureBuckets(Flux.fromIterable(bucketTopologies)) + .then(reconfigureGlobal(globalTopology)) .then(Mono.defer(() -> Flux .fromIterable(new ArrayList<>(nodes)) @@ -781,115 +842,75 @@ private void reconfigureDisconnectAll(Runnable doFinally) { ); } - private Mono reconfigureGlobal(final GlobalConfig config) { - return Mono.defer(() -> { - if (config == null) { - return Mono.empty(); - } - - return Flux - .fromIterable(config.portInfos()) - .flatMap(ni -> { - boolean tls = coreContext.environment().securityConfig().tlsEnabled(); - - final Map services = tls ? ni.sslPorts() : ni.ports(); - - Flux serviceRemoveFlux = Flux - .fromArray(ServiceType.values()) - .filter(s -> !services.containsKey(s)) - .flatMap(s -> removeServiceFrom( - ni.identifier(), - s, - Optional.empty()) - .onErrorResume(throwable -> { - eventBus.publish(new ServiceReconfigurationFailedEvent( - coreContext, - ni.hostname(), - s, - throwable - )); - return Mono.empty(); - }) - ); - - - Flux serviceAddFlux = Flux - .fromIterable(services.entrySet()) - .flatMap(s -> ensureServiceAt( - ni.identifier(), - s.getKey(), - s.getValue(), - Optional.empty()) - .onErrorResume(throwable -> { - eventBus.publish(new ServiceReconfigurationFailedEvent( - coreContext, - ni.hostname(), - s.getKey(), - throwable - )); - return Mono.empty(); - }) - ); - - return Flux.merge(serviceAddFlux, serviceRemoveFlux); - }) - .then(); - }); + private Mono reconfigureGlobal(final @Nullable ClusterTopology topology) { + return topology == null + ? Mono.empty() + : reconfigureGlobalOrBucket(topology, null); } /** * Contains logic to perform reconfiguration for a bucket config. * - * @param bucketConfigs the flux of bucket configs currently open. + * @param bucketTopologies the flux of topologies from currently open buckets * @return a mono once reconfiguration for all buckets is complete */ - private Mono reconfigureBuckets(final Flux bucketConfigs) { - return bucketConfigs.flatMap(bc -> - Flux.fromIterable(bc.nodes()) - .flatMap(ni -> { - boolean tls = coreContext.environment().securityConfig().tlsEnabled(); - - final Map services = tls ? ni.sslServices() : ni.services(); - - Flux serviceRemoveFlux = Flux - .fromArray(ServiceType.values()) - .filter(s -> !services.containsKey(s)) - .flatMap(s -> removeServiceFrom( - ni.identifier(), + private Mono reconfigureBuckets(final Flux bucketTopologies) { + return bucketTopologies.flatMap(bc -> reconfigureGlobalOrBucket(bc, bc.bucket().name())) + .then(); + } + + /** + * @param bucketName pass non-null if using the topology to configure bucket-scoped services. + * + * @implNote Maybe in the future we can inspect the ClusterTopology to see if it has a BucketTopology, + * and get the bucket name from there. However, let's make it explicit for now; this leaves the door open + * to using a ClusterTopologyWithBucket to configure global services (by passing a null bucket name). + */ + private Mono reconfigureGlobalOrBucket( + ClusterTopology topology, + @Nullable String bucketName + ) { + return Flux.fromIterable(topology.nodes()) + .flatMap(ni -> { + Flux serviceRemoveFlux = Flux + .fromArray(ServiceType.values()) + .filter(s -> !ni.has(s)) + .flatMap(s -> removeServiceFrom( + ni.id(), s, - s.scope() == ServiceScope.BUCKET ? Optional.of(bc.name()) : Optional.empty()) + s.scope() == ServiceScope.BUCKET ? Optional.ofNullable(bucketName) : Optional.empty()) .onErrorResume(throwable -> { eventBus.publish(new ServiceReconfigurationFailedEvent( coreContext, - ni.hostname(), + ni.host(), s, throwable )); return Mono.empty(); }) - ); + ); - Flux serviceAddFlux = Flux - .fromIterable(services.entrySet()) - .flatMap(s -> ensureServiceAt( - ni.identifier(), + Flux serviceAddFlux = Flux + .fromIterable(ni.ports().entrySet()) + .flatMap(s -> ensureServiceAt( + ni.id(), s.getKey(), s.getValue(), - s.getKey().scope() == ServiceScope.BUCKET ? Optional.of(bc.name()) : Optional.empty()) + s.getKey().scope() == ServiceScope.BUCKET ? Optional.ofNullable(bucketName) : Optional.empty()) .onErrorResume(throwable -> { eventBus.publish(new ServiceReconfigurationFailedEvent( coreContext, - ni.hostname(), + ni.host(), s.getKey(), throwable )); return Mono.empty(); }) - ); + ); - return Flux.merge(serviceAddFlux, serviceRemoveFlux); - }) - ).then(); + return Flux.merge(serviceAddFlux, serviceRemoveFlux); + }) + .then(); } /** @@ -986,6 +1007,12 @@ public CoreEnvironment environment() { return context().environment(); } + @Stability.Internal + @Override + public CoreResources coreResources() { + return coreResources; + } + @Override public CompletableFuture waitUntilReady( Set serviceTypes, @@ -1005,8 +1032,10 @@ public static class ResponseMetricIdentifier { private final @Nullable String scopeName; private final @Nullable String collectionName; private final @Nullable String exceptionSimpleName; + private final @Nullable String clusterName; + private final @Nullable String clusterUuid; - ResponseMetricIdentifier(final Request request, @Nullable String exceptionSimpleName) { + ResponseMetricIdentifier(final Request request, @Nullable String exceptionSimpleName, @Nullable ClusterIdentifier clusterIdent) { this.exceptionSimpleName = exceptionSimpleName; if (request.serviceType() == null) { if (request instanceof CoreTransactionRequest) { @@ -1019,6 +1048,8 @@ public static class ResponseMetricIdentifier { this.serviceType = CbTracing.getTracingId(request.serviceType()); } this.requestName = request.name(); + this.clusterName = clusterIdent == null ? null : clusterIdent.clusterName(); + this.clusterUuid = clusterIdent == null ? null : clusterIdent.clusterUuid(); if (request instanceof KeyValueRequest) { KeyValueRequest kv = (KeyValueRequest) request; bucketName = request.bucket(); @@ -1053,6 +1084,8 @@ public ResponseMetricIdentifier(final String serviceType, final String requestNa this.scopeName = null; this.collectionName = null; this.exceptionSimpleName = null; + this.clusterName = null; + this.clusterUuid = null; } public String serviceType() { @@ -1073,12 +1106,14 @@ public boolean equals(Object o) { && Objects.equals(bucketName, that.bucketName) && Objects.equals(scopeName, that.scopeName) && Objects.equals(collectionName, that.collectionName) - && Objects.equals(exceptionSimpleName, that.exceptionSimpleName); + && Objects.equals(exceptionSimpleName, that.exceptionSimpleName) + && Objects.equals(clusterName, that.clusterName) + && Objects.equals(clusterUuid, that.clusterUuid); } @Override public int hashCode() { - return Objects.hash(serviceType, requestName, bucketName, scopeName, collectionName, exceptionSimpleName); + return Objects.hash(serviceType, requestName, bucketName, scopeName, collectionName, exceptionSimpleName, clusterName, clusterUuid); } } diff --git a/core-io/src/main/java/com/couchbase/client/core/CoreContext.java b/core-io/src/main/java/com/couchbase/client/core/CoreContext.java index 62654bf68..6c264a4c4 100644 --- a/core-io/src/main/java/com/couchbase/client/core/CoreContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/CoreContext.java @@ -81,6 +81,11 @@ public CoreEnvironment environment() { return env; } + @Stability.Internal + public CoreResources coreResources() { + return core.coreResources(); + } + /** * @deprecated Always return an empty optional. Alternate addresses * are now resolved immediately when parsing cluster topology. diff --git a/core-io/src/main/java/com/couchbase/client/core/CoreProtostellar.java b/core-io/src/main/java/com/couchbase/client/core/CoreProtostellar.java index 0c2bef921..b33babf8a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/CoreProtostellar.java +++ b/core-io/src/main/java/com/couchbase/client/core/CoreProtostellar.java @@ -83,7 +83,8 @@ public CoreProtostellar( final Authenticator authenticator, final ConnectionString connectionString ) { - this.ctx = new ProtostellarContext(env, authenticator); + CoreResources coreResources = () -> env.requestTracer(); + this.ctx = new ProtostellarContext(env, authenticator, coreResources); notNull(connectionString, "connectionString"); checkConnectionStringScheme(connectionString, ConnectionString.Scheme.COUCHBASE2); @@ -208,6 +209,11 @@ public CoreEnvironment environment() { return context().environment(); } + @Override + public CoreResources coreResources() { + return context().coreResources(); + } + @Override public CompletableFuture waitUntilReady( Set serviceTypes, diff --git a/core-io/src/main/java/com/couchbase/client/core/CoreResources.java b/core-io/src/main/java/com/couchbase/client/core/CoreResources.java new file mode 100644 index 000000000..bade79739 --- /dev/null +++ b/core-io/src/main/java/com/couchbase/client/core/CoreResources.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.couchbase.client.core; + +import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.CoreCouchbaseOps; +import com.couchbase.client.core.cnc.RequestTracer; + +/** + * Resources that are owned by a {@link CoreCouchbaseOps}. (E.g. either a {@link Core} or {@link CoreProtostellar}. + *

+ * It is explicitly not owned by a CoreEnvironment, which can be shared between multiple Cluster objects, and so is not suitable for any information + * tied to a CoreCouchbaseOps. + *

+ * Consider preferring adding new resources here rather than into the *Environment objects. + */ +@Stability.Internal +public interface CoreResources { + RequestTracer requestTracer(); +} diff --git a/core-io/src/main/java/com/couchbase/client/core/annotation/UsedBy.java b/core-io/src/main/java/com/couchbase/client/core/annotation/UsedBy.java index 153f49f15..4a879be47 100644 --- a/core-io/src/main/java/com/couchbase/client/core/annotation/UsedBy.java +++ b/core-io/src/main/java/com/couchbase/client/core/annotation/UsedBy.java @@ -33,6 +33,7 @@ Project value(); enum Project { - SPRING_DATA_COUCHBASE + SPRING_DATA_COUCHBASE, + QUARKUS_COUCHBASE } } diff --git a/core-io/src/main/java/com/couchbase/client/core/api/CoreCouchbaseOps.java b/core-io/src/main/java/com/couchbase/client/core/api/CoreCouchbaseOps.java index df349580b..f6097e69c 100644 --- a/core-io/src/main/java/com/couchbase/client/core/api/CoreCouchbaseOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/api/CoreCouchbaseOps.java @@ -19,6 +19,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.CoreKeyspace; import com.couchbase.client.core.CoreProtostellar; +import com.couchbase.client.core.CoreResources; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.api.kv.CoreKvBinaryOps; import com.couchbase.client.core.api.kv.CoreKvOps; @@ -67,6 +68,8 @@ public interface CoreCouchbaseOps { CoreEnvironment environment(); + CoreResources coreResources(); + CompletableFuture waitUntilReady( Set serviceTypes, Duration timeout, diff --git a/core-io/src/main/java/com/couchbase/client/core/api/kv/CoreKvOps.java b/core-io/src/main/java/com/couchbase/client/core/api/kv/CoreKvOps.java index afa4d703e..d720e9417 100644 --- a/core-io/src/main/java/com/couchbase/client/core/api/kv/CoreKvOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/api/kv/CoreKvOps.java @@ -328,23 +328,27 @@ default Mono subdocGetReactive( Flux subdocGetAllReplicasReactive( CoreCommonOptions common, String key, - List commands + List commands, + CoreReadPreference readPreference ); Mono subdocGetAnyReplicaReactive( CoreCommonOptions common, String key, - List commands + List commands, + CoreReadPreference readPreference ); Flux getAllReplicasReactive( CoreCommonOptions common, - String key + String key, + CoreReadPreference readPreference ); Mono getAnyReplicaReactive( CoreCommonOptions common, - String key + String key, + CoreReadPreference readPreference ); CoreAsyncResponse subdocMutateAsync( diff --git a/core-io/src/main/java/com/couchbase/client/core/api/kv/CoreReadPreference.java b/core-io/src/main/java/com/couchbase/client/core/api/kv/CoreReadPreference.java new file mode 100644 index 000000000..0ff983d50 --- /dev/null +++ b/core-io/src/main/java/com/couchbase/client/core/api/kv/CoreReadPreference.java @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.couchbase.client.core.api.kv; + +public enum CoreReadPreference { + NO_PREFERENCE, + PREFERRED_SERVER_GROUP +} diff --git a/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryOps.java b/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryOps.java index 9bb4965be..439916ea2 100644 --- a/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryOps.java @@ -17,7 +17,7 @@ import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.api.kv.CoreAsyncResponse; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Mono; import reactor.util.annotation.Nullable; diff --git a/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryResult.java b/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryResult.java index 19a997d17..f98c25dcc 100644 --- a/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryResult.java +++ b/core-io/src/main/java/com/couchbase/client/core/api/query/CoreQueryResult.java @@ -18,7 +18,7 @@ import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.msg.query.QueryChunkRow; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.util.annotation.Nullable; import java.util.List; diff --git a/core-io/src/main/java/com/couchbase/client/core/api/query/CoreReactiveQueryResult.java b/core-io/src/main/java/com/couchbase/client/core/api/query/CoreReactiveQueryResult.java index 6e149d3a0..793d7e041 100644 --- a/core-io/src/main/java/com/couchbase/client/core/api/query/CoreReactiveQueryResult.java +++ b/core-io/src/main/java/com/couchbase/client/core/api/query/CoreReactiveQueryResult.java @@ -18,7 +18,7 @@ import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.msg.query.QueryChunkRow; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.util.annotation.Nullable; diff --git a/core-io/src/main/java/com/couchbase/client/core/api/search/ClassicCoreSearchOps.java b/core-io/src/main/java/com/couchbase/client/core/api/search/ClassicCoreSearchOps.java index b3418961a..fd711dc28 100644 --- a/core-io/src/main/java/com/couchbase/client/core/api/search/ClassicCoreSearchOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/api/search/ClassicCoreSearchOps.java @@ -17,6 +17,7 @@ package com.couchbase.client.core.api.search; import com.couchbase.client.core.Core; +import com.couchbase.client.core.CoreResources; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.api.kv.CoreAsyncResponse; import com.couchbase.client.core.api.manager.CoreBucketAndScope; @@ -158,6 +159,10 @@ private CoreEnvironment environment() { return core.context().environment(); } + private CoreResources coreResources() { + return core.context().coreResources(); + } + private ServerSearchRequest searchRequest(String indexName, CoreSearchQuery query, CoreSearchOptions opts) { notNullOrEmpty(indexName, "IndexName", () -> new ReducedSearchErrorContext(indexName, query)); Duration timeout = opts.commonOptions().timeout().orElse(environment().timeoutConfig().searchTimeout()); @@ -170,7 +175,7 @@ private ServerSearchRequest searchRequest(String indexName, CoreSearchQuery quer RetryStrategy retryStrategy = opts.commonOptions().retryStrategy().orElse(environment().retryStrategy()); - RequestSpan span = environment() + RequestSpan span = coreResources() .requestTracer() .requestSpan(TracingIdentifiers.SPAN_REQUEST_SEARCH, opts.commonOptions().parentSpan().orElse(null)); ServerSearchRequest request = new ServerSearchRequest(timeout, core.context(), retryStrategy, core.context().authenticator(), indexName, bytes, span, scope); @@ -352,7 +357,7 @@ private ServerSearchRequest searchRequestV2(String indexName, CoreSearchRequest RetryStrategy retryStrategy = opts.commonOptions().retryStrategy().orElse(environment().retryStrategy()); - RequestSpan span = environment() + RequestSpan span = coreResources() .requestTracer() .requestSpan(TracingIdentifiers.SPAN_REQUEST_SEARCH, opts.commonOptions().parentSpan().orElse(null)); ServerSearchRequest request = new ServerSearchRequest(timeout, core.context(), retryStrategy, core.context().authenticator(), indexName, bytes, span, scope); diff --git a/core-io/src/main/java/com/couchbase/client/core/api/search/util/SearchCapabilityCheck.java b/core-io/src/main/java/com/couchbase/client/core/api/search/util/SearchCapabilityCheck.java index 259867c8f..77731202c 100644 --- a/core-io/src/main/java/com/couchbase/client/core/api/search/util/SearchCapabilityCheck.java +++ b/core-io/src/main/java/com/couchbase/client/core/api/search/util/SearchCapabilityCheck.java @@ -17,10 +17,8 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.annotation.Stability; -import com.couchbase.client.core.config.ClusterCapabilities; import com.couchbase.client.core.error.FeatureNotAvailableException; -import com.couchbase.client.core.service.ServiceType; -import com.couchbase.client.core.util.ClusterCapabilitiesUtil; +import com.couchbase.client.core.topology.ClusterCapability; import java.time.Duration; import java.util.concurrent.CompletableFuture; @@ -31,25 +29,26 @@ private SearchCapabilityCheck() { } public static CompletableFuture scopedSearchIndexCapabilityCheck(Core core, Duration timeout) { - return ClusterCapabilitiesUtil.waitForClusterCapabilities(core, timeout) - .doOnNext(clusterCapabilities -> { - if (!clusterCapabilities.get(ServiceType.SEARCH).contains(ClusterCapabilities.SCOPED_SEARCH_INDEX)) { - throw new FeatureNotAvailableException("This method cannot be used with this cluster, as it does not support scoped search indexes. Please use a cluster fully upgraded to Couchbase Server 7.6 or above."); - } - }) - .then() - .toFuture(); + return requireCapability(core, timeout, ClusterCapability.SEARCH_SCOPED, + "This method cannot be used with this cluster, as it does not support scoped search indexes." + + " Please use a cluster fully upgraded to Couchbase Server 7.6 or above."); } public static CompletableFuture vectorSearchCapabilityCheck(Core core, Duration timeout) { - return ClusterCapabilitiesUtil.waitForClusterCapabilities(core, timeout) - .doOnNext(clusterCapabilities -> { - if (!clusterCapabilities.get(ServiceType.SEARCH).contains(ClusterCapabilities.VECTOR_SEARCH)) { - throw new FeatureNotAvailableException("This method cannot be used with this cluster, as it does not support vector search. Please use a cluster fully upgraded to Couchbase Server 7.6 or above."); - } - }) - .then() - .toFuture(); + return requireCapability(core, timeout, ClusterCapability.SEARCH_VECTOR, + "This method cannot be used with this cluster, as it does not support vector search." + + " Please use a cluster fully upgraded to Couchbase Server 7.6 or above."); + } + + private static CompletableFuture requireCapability(Core core, Duration timeout, ClusterCapability capability, String message) { + return core.waitForClusterTopology(timeout) + .doOnNext(topology -> { + if (!topology.hasCapability(capability)) { + throw new FeatureNotAvailableException(message); + } + }) + .then() + .toFuture(); } } diff --git a/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvBinaryOps.java b/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvBinaryOps.java index 5003679cd..56e6ba31b 100644 --- a/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvBinaryOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvBinaryOps.java @@ -18,6 +18,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.CoreContext; import com.couchbase.client.core.CoreKeyspace; +import com.couchbase.client.core.CoreResources; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.api.kv.CoreAsyncResponse; import com.couchbase.client.core.api.kv.CoreCounterResult; @@ -92,7 +93,7 @@ private AppendRequest appendRequestClassic(final String id, final byte[] content CoreKvBinaryParamValidators.validateAppendPrependArgs(id, keyspace, options, content, cas, durability); Duration timeout = timeout(options, durability); RetryStrategy retryStrategy = options.retryStrategy().orElse(environment().retryStrategy()); - RequestSpan span = environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_APPEND, + RequestSpan span = coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_APPEND, options.parentSpan().orElse(null)); AppendRequest request = new AppendRequest(timeout, context(), collectionIdentifier(), retryStrategy, id, content, cas, durability.levelIfSynchronous(), span); @@ -126,7 +127,7 @@ private PrependRequest prependRequestClassic(final String id, final byte[] conte CoreKvBinaryParamValidators.validateAppendPrependArgs(id, keyspace, options, content, cas, durability); Duration timeout = timeout(options, durability); RetryStrategy retryStrategy = options.retryStrategy().orElse(environment().retryStrategy()); - RequestSpan span = environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_PREPEND, + RequestSpan span = coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_PREPEND, options.parentSpan().orElse(null)); PrependRequest request = new PrependRequest(timeout, context(), collectionIdentifier(), retryStrategy, id, content, cas, durability.levelIfSynchronous(), span); @@ -159,7 +160,7 @@ private IncrementRequest incrementRequestClassic(final String id, final CoreComm durability); Duration timeout = timeout(options, durability); RetryStrategy retryStrategy = options.retryStrategy().orElse(environment().retryStrategy()); - RequestSpan span = environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_INCREMENT, + RequestSpan span = coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_INCREMENT, options.parentSpan().orElse(null)); IncrementRequest request = new IncrementRequest(timeout, context(), collectionIdentifier(), retryStrategy, id, @@ -193,7 +194,7 @@ private DecrementRequest decrementRequestClassic(final String id, final CoreComm notNullOrEmpty(id, "Id", () -> ReducedKeyValueErrorContext.create(id, collectionIdentifier())); Duration timeout = timeout(opts, durability); RetryStrategy retryStrategy = opts.retryStrategy().orElse(environment().retryStrategy()); - RequestSpan span = environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_DECREMENT, + RequestSpan span = coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_DECREMENT, opts.parentSpan().orElse(null)); DecrementRequest request = new DecrementRequest(timeout, context(), collectionIdentifier(), retryStrategy, id, @@ -210,6 +211,10 @@ private CoreEnvironment environment() { return core.context().environment(); } + private CoreResources coreResources() { + return core.context().coreResources(); + } + private CollectionIdentifier collectionIdentifier() { return keyspace.toCollectionIdentifier(); } diff --git a/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvOps.java b/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvOps.java index 0b7d4a9ee..84d94a6bd 100644 --- a/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/classic/kv/ClassicCoreKvOps.java @@ -27,7 +27,6 @@ import com.couchbase.client.core.api.kv.CoreExpiry; import com.couchbase.client.core.api.kv.CoreGetResult; import com.couchbase.client.core.api.kv.CoreKvOps; -import com.couchbase.client.core.api.kv.CoreKvParamValidators; import com.couchbase.client.core.api.kv.CoreKvResponseMetadata; import com.couchbase.client.core.api.kv.CoreLookupInMacro; import com.couchbase.client.core.api.kv.CoreMutationResult; @@ -36,6 +35,7 @@ import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.api.kv.CoreSubdocMutateCommand; import com.couchbase.client.core.api.kv.CoreSubdocMutateResult; +import com.couchbase.client.core.api.kv.CoreReadPreference; import com.couchbase.client.core.classic.ClassicHelper; import com.couchbase.client.core.cnc.CbTracing; import com.couchbase.client.core.cnc.RequestSpan; @@ -49,7 +49,6 @@ import com.couchbase.client.core.error.DocumentNotFoundException; import com.couchbase.client.core.error.DocumentUnretrievableException; import com.couchbase.client.core.error.InvalidArgumentException; -import com.couchbase.client.core.error.context.ErrorContext; import com.couchbase.client.core.error.context.KeyValueErrorContext; import com.couchbase.client.core.error.context.ReducedKeyValueErrorContext; import com.couchbase.client.core.io.CollectionIdentifier; @@ -146,7 +145,7 @@ public ClassicCoreKvOps(Core core, CoreKeyspace keyspace) { this.defaultKvTimeout = ctx.environment().timeoutConfig().kvTimeout(); this.defaultKvDurableTimeout = ctx.environment().timeoutConfig().kvDurableTimeout(); this.defaultRetryStrategy = ctx.environment().retryStrategy(); - this.requestTracer = ctx.environment().requestTracer(); + this.requestTracer = ctx.coreResources().requestTracer(); this.keyspace = requireNonNull(keyspace); this.collectionIdentifier = keyspace.toCollectionIdentifier(); this.rangeScanOrchestrator = new RangeScanOrchestrator(core, collectionIdentifier); @@ -711,19 +710,12 @@ public CoreAsyncResponse subdocGetAsync( // This should be superfluous now - if the op failed then error() should be set - but leaving as a fail-safe. commonKvResponseCheck(req, res); }, - it -> new CoreSubdocGetResult( - keyspace, - key, - CoreKvResponseMetadata.from(it.flexibleExtras()), - Arrays.asList(it.values()), - it.cas(), - it.isDeleted() - ) + it -> it.toCore(keyspace, key) ); } @Override - public Flux getAllReplicasReactive(CoreCommonOptions common, String key) { + public Flux getAllReplicasReactive(CoreCommonOptions common, String key, CoreReadPreference readPreference) { validateGetAllReplicasParams(common, key); Duration timeout = timeout(common); @@ -736,7 +728,8 @@ public Flux getAllReplicasReactive(CoreCommonOptions common, Stri timeout, retryStrategy, common.clientContext(), - common.parentSpan().orElse(null) + common.parentSpan().orElse(null), + readPreference ).map(it -> new CoreGetResult( CoreKvResponseMetadata.from(it.getResponse().flexibleExtras()), keyspace, @@ -750,17 +743,17 @@ public Flux getAllReplicasReactive(CoreCommonOptions common, Stri } @Override - public Mono getAnyReplicaReactive(CoreCommonOptions common, String key) { + public Mono getAnyReplicaReactive(CoreCommonOptions common, String key, CoreReadPreference readPreference) { validateGetAnyReplicaParams(common, key); RequestSpan getAnySpan = span(common, TracingIdentifiers.SPAN_GET_ANY_REPLICA); - return getAllReplicasReactive(common.withParentSpan(getAnySpan), key) + return getAllReplicasReactive(common.withParentSpan(getAnySpan), key, readPreference) .next() .doFinally(signalType -> getAnySpan.end()); } @Override - public Flux subdocGetAllReplicasReactive(CoreCommonOptions common, String key, List commands) { + public Flux subdocGetAllReplicasReactive(CoreCommonOptions common, String key, List commands, CoreReadPreference readPreference) { validateSubdocGetAllParams(common, key, commands); Duration timeout = timeout(common); @@ -774,15 +767,16 @@ public Flux subdocGetAllReplicasReactive(CoreCommonOptions timeout, retryStrategy, common.clientContext(), - common.parentSpan().orElse(null) + common.parentSpan().orElse(null), + readPreference ); } @Override - public Mono subdocGetAnyReplicaReactive(CoreCommonOptions common, String key, List commands) { + public Mono subdocGetAnyReplicaReactive(CoreCommonOptions common, String key, List commands, CoreReadPreference readPreference) { validateSubdocGetAnyParams(common, key, commands); RequestSpan getAnySpan = span(common, TracingIdentifiers.SPAN_GET_ANY_REPLICA); - return subdocGetAllReplicasReactive(common.withParentSpan(getAnySpan), key, commands) + return subdocGetAllReplicasReactive(common.withParentSpan(getAnySpan), key, commands, readPreference) .next() .switchIfEmpty(Mono.error(new DocumentUnretrievableException(ReducedKeyValueErrorContext.create(key, collectionIdentifier)))) .doFinally(signalType -> getAnySpan.end()); diff --git a/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryOps.java b/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryOps.java index 97d24db34..406bc05af 100644 --- a/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryOps.java @@ -43,10 +43,10 @@ import com.couchbase.client.core.msg.kv.MutationToken; import com.couchbase.client.core.msg.query.QueryRequest; import com.couchbase.client.core.msg.query.QueryResponse; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryReason; import com.couchbase.client.core.retry.RetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.transaction.CoreTransactionsReactive; import com.couchbase.client.core.transaction.config.CoreSingleQueryTransactionOptions; import com.couchbase.client.core.transaction.config.CoreTransactionsConfig; @@ -197,7 +197,7 @@ private QueryRequest queryRequest(String statement, } byte[] queryBytes = query.toString().getBytes(StandardCharsets.UTF_8); - RequestSpan span = core.context().environment() + RequestSpan span = core.context().coreResources() .requestTracer() .requestSpan(TracingIdentifiers.SPAN_REQUEST_QUERY, options.commonOptions().parentSpan().orElse(null)); @@ -219,7 +219,7 @@ private static Mono singleQueryTransactionBuffered(Core core, } CoreTransactionsReactive tri = configureTransactions(core, opts); - SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().environment().requestTracer(), null, + SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().coreResources().requestTracer(), null, null, TracingIdentifiers.SPAN_REQUEST_QUERY, opts.commonOptions().parentSpan().map(SpanWrapper::new).orElse(null)) .attribute(TracingIdentifiers.ATTR_STATEMENT, statement) .attribute(TracingIdentifiers.ATTR_TRANSACTION_SINGLE_QUERY, true); @@ -253,7 +253,7 @@ private Mono singleQueryTransactionReactive(String stat } CoreTransactionsReactive tri = configureTransactions(core, opts); - SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().environment().requestTracer(), null, + SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().coreResources().requestTracer(), null, null, TracingIdentifiers.SPAN_REQUEST_QUERY, opts.commonOptions().parentSpan().map(SpanWrapper::new).orElse(null)) .attribute(TracingIdentifiers.ATTR_STATEMENT, statement) .attribute(TracingIdentifiers.ATTR_TRANSACTION_SINGLE_QUERY, true); diff --git a/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryResult.java b/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryResult.java index 2bee6f096..57c040b84 100644 --- a/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryResult.java +++ b/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreQueryResult.java @@ -22,7 +22,7 @@ import com.couchbase.client.core.msg.query.QueryChunkHeader; import com.couchbase.client.core.msg.query.QueryChunkRow; import com.couchbase.client.core.msg.query.QueryChunkTrailer; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import java.util.ArrayList; import java.util.List; diff --git a/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreReactiveQueryResult.java b/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreReactiveQueryResult.java index 364f62777..5bd93a607 100644 --- a/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreReactiveQueryResult.java +++ b/core-io/src/main/java/com/couchbase/client/core/classic/query/ClassicCoreReactiveQueryResult.java @@ -21,7 +21,7 @@ import com.couchbase.client.core.api.query.CoreReactiveQueryResult; import com.couchbase.client.core.msg.query.QueryChunkRow; import com.couchbase.client.core.msg.query.QueryResponse; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.util.annotation.Nullable; diff --git a/core-io/src/main/java/com/couchbase/client/core/classic/query/PreparedStatementStrategy.java b/core-io/src/main/java/com/couchbase/client/core/classic/query/PreparedStatementStrategy.java index d30c9fd9a..92a5511a4 100644 --- a/core-io/src/main/java/com/couchbase/client/core/classic/query/PreparedStatementStrategy.java +++ b/core-io/src/main/java/com/couchbase/client/core/classic/query/PreparedStatementStrategy.java @@ -42,7 +42,7 @@ public PreparedStatementStrategy(Core core, int cacheSize) { } protected RequestTracer requestTracer() { - return core.context().environment().requestTracer(); + return core.context().coreResources().requestTracer(); } public abstract Mono execute(QueryRequest request); diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/CbTracing.java b/core-io/src/main/java/com/couchbase/client/core/cnc/CbTracing.java index 3bf9e14f3..9860aacfb 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/CbTracing.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/CbTracing.java @@ -65,7 +65,7 @@ public static boolean isInternalSpan(final RequestSpan span) { */ @UsedBy(SPRING_DATA_COUCHBASE) public static RequestSpan newSpan(CoreContext coreContext, String spanName, RequestSpan parent) { - return coreContext.environment().requestTracer().requestSpan(spanName, parent); + return coreContext.coreResources().requestTracer().requestSpan(spanName, parent); } /** diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/DefaultEventBus.java b/core-io/src/main/java/com/couchbase/client/core/cnc/DefaultEventBus.java index 75179814e..95ffbcdab 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/DefaultEventBus.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/DefaultEventBus.java @@ -16,10 +16,10 @@ package com.couchbase.client.core.cnc; -import com.couchbase.client.core.deps.org.jctools.queues.MpscArrayQueue; import com.couchbase.client.core.json.Mapper; import com.couchbase.client.core.util.CbCollections; import com.couchbase.client.core.util.NanoTimestamp; +import com.couchbase.client.core.util.NativeImageHelper; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.scheduler.Scheduler; @@ -137,7 +137,7 @@ public static DefaultEventBus create(final Scheduler scheduler) { } private DefaultEventBus(final Builder builder) { - eventQueue = new MpscArrayQueue<>(builder.queueCapacity); + eventQueue = NativeImageHelper.createMpscArrayQueue(builder.queueCapacity); scheduler = builder.scheduler; errorLogging = builder.errorLogging.orElse(null); threadName = builder.threadName; diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/OrphanReporter.java b/core-io/src/main/java/com/couchbase/client/core/cnc/OrphanReporter.java index e1f58439f..f83f3fd47 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/OrphanReporter.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/OrphanReporter.java @@ -20,7 +20,6 @@ import com.couchbase.client.core.cnc.events.tracing.OrphanRecordDroppedEvent; import com.couchbase.client.core.cnc.events.tracing.OrphanReporterFailureDetectedEvent; import com.couchbase.client.core.cnc.events.tracing.OrphansRecordedEvent; -import com.couchbase.client.core.deps.org.jctools.queues.MpscArrayQueue; import com.couchbase.client.core.env.OrphanReporterConfig; import com.couchbase.client.core.msg.Request; import com.couchbase.client.core.msg.UnmonitoredRequest; @@ -29,6 +28,7 @@ import com.couchbase.client.core.service.ServiceType; import com.couchbase.client.core.util.HostAndPort; import com.couchbase.client.core.util.NanoTimestamp; +import com.couchbase.client.core.util.NativeImageHelper; import reactor.core.publisher.Mono; import java.time.Duration; @@ -86,7 +86,7 @@ public class OrphanReporter { @Stability.Internal public OrphanReporter(final EventBus eventBus, final OrphanReporterConfig config) { this.eventBus = eventBus; - this.orphanQueue = new MpscArrayQueue<>(config.queueLength()); + this.orphanQueue = NativeImageHelper.createMpscArrayQueue(config.queueLength()); this.emitInterval = config.emitInterval(); this.sampleSize = config.sampleSize(); this.enabled = config.enabled(); diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/TracingIdentifiers.java b/core-io/src/main/java/com/couchbase/client/core/cnc/TracingIdentifiers.java index 5200bf637..af42c3c78 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/TracingIdentifiers.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/TracingIdentifiers.java @@ -264,6 +264,9 @@ private TracingIdentifiers() {} public static final String ATTR_SCOPE = "db.couchbase.scope"; public static final String ATTR_DOCUMENT_ID = "db.couchbase.document_id"; + public static final String ATTR_CLUSTER_UUID = "db.couchbase.cluster_uuid"; + public static final String ATTR_CLUSTER_NAME = "db.couchbase.cluster_name"; + public static final String ATTR_TRANSACTION_ID = "db.couchbase.transaction.id"; public static final String ATTR_TRANSACTION_ATTEMPT_ID = "db.couchbase.transaction.attempt_id"; public static final String ATTR_TRANSACTION_STATE = "db.couchbase.transaction.state"; @@ -291,6 +294,7 @@ private TracingIdentifiers() {} public static final String TRANSACTION_OP_INSERT = "transaction_insert"; public static final String TRANSACTION_OP_REMOVE = "transaction_remove"; public static final String TRANSACTION_OP_GET = "transaction_get"; + public static final String TRANSACTION_OP_GET_REPLICA_FROM_PREFERRED_SERVER_GROUP = "transaction_get_replica_from_preferred_server_group"; public static final String TRANSACTION_OP_QUERY = "transaction_query"; public static final String TRANSACTION_OP_INSERT_STAGE = "transaction_insert_stage"; public static final String TRANSACTION_OP_REPLACE_STAGE = "transaction_replace_stage"; diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/LoggingMeter.java b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/LoggingMeter.java index a0dae3796..0faeae5c1 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/LoggingMeter.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/LoggingMeter.java @@ -32,6 +32,7 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -47,7 +48,7 @@ public class LoggingMeter implements Meter { private final Thread worker; private final AtomicBoolean running = new AtomicBoolean(false); - private final Map valueRecorders = new ConcurrentHashMap<>(); + private final ConcurrentMap valueRecorders = new ConcurrentHashMap<>(); private final long emitIntervalMs; private final LoggingMeterConfig config; @@ -86,7 +87,7 @@ public Counter counter(String name, Map tags) { } @Override - public synchronized ValueRecorder valueRecorder(String name, Map tags) { + public ValueRecorder valueRecorder(String name, Map tags) { try { return valueRecorders.computeIfAbsent( new NameAndTags(name, tags), @@ -141,7 +142,7 @@ public void run() { } @SuppressWarnings("unchecked") - private synchronized void dumpMetrics() { + private void dumpMetrics() { Map output = new HashMap<>(); Map meta = new HashMap<>(); diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopCounter.java b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopCounter.java index 53d124400..171ed0904 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopCounter.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopCounter.java @@ -20,6 +20,8 @@ public class NoopCounter implements Counter { + public static final NoopCounter INSTANCE = new NoopCounter(); + @Override public void incrementBy(long number) { diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopMeter.java b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopMeter.java index e9b3de268..ce3b35271 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopMeter.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopMeter.java @@ -30,12 +30,12 @@ private NoopMeter() {} @Override public Counter counter(String name, Map tags) { - return new NoopCounter(); + return NoopCounter.INSTANCE; } @Override public ValueRecorder valueRecorder(String name, Map tags) { - return new NoopValueRecorder(); + return NoopValueRecorder.INSTANCE; } @Override diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopValueRecorder.java b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopValueRecorder.java index 4cc068a03..5bc30f433 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopValueRecorder.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/metrics/NoopValueRecorder.java @@ -20,6 +20,8 @@ public class NoopValueRecorder implements ValueRecorder { + public static final NoopValueRecorder INSTANCE = new NoopValueRecorder(); + @Override public void recordValue(long value) { diff --git a/core-io/src/main/java/com/couchbase/client/core/cnc/tracing/ThresholdLoggingTracer.java b/core-io/src/main/java/com/couchbase/client/core/cnc/tracing/ThresholdLoggingTracer.java index 825c519af..d2e433daa 100644 --- a/core-io/src/main/java/com/couchbase/client/core/cnc/tracing/ThresholdLoggingTracer.java +++ b/core-io/src/main/java/com/couchbase/client/core/cnc/tracing/ThresholdLoggingTracer.java @@ -21,7 +21,6 @@ import com.couchbase.client.core.cnc.RequestTracer; import com.couchbase.client.core.cnc.TracingIdentifiers; import com.couchbase.client.core.cnc.events.tracing.OverThresholdRequestsRecordedEvent; -import com.couchbase.client.core.deps.org.jctools.queues.MpscArrayQueue; import com.couchbase.client.core.env.ThresholdLoggingTracerConfig; import com.couchbase.client.core.error.TracerException; import com.couchbase.client.core.msg.Request; @@ -29,6 +28,7 @@ import com.couchbase.client.core.transaction.components.CoreTransactionRequest; import com.couchbase.client.core.util.HostAndPort; import com.couchbase.client.core.util.NanoTimestamp; +import com.couchbase.client.core.util.NativeImageHelper; import reactor.core.publisher.Mono; import java.time.Duration; @@ -121,7 +121,7 @@ public static ThresholdLoggingTracer create(final EventBus eventBus, ThresholdLo */ private ThresholdLoggingTracer(final EventBus eventBus, ThresholdLoggingTracerConfig config) { this.eventBus = eventBus; - this.overThresholdQueue = new MpscArrayQueue<>(config.queueLength()); + this.overThresholdQueue = NativeImageHelper.createMpscArrayQueue(config.queueLength()); kvThreshold = config.kvThreshold().toNanos(); analyticsThreshold = config.analyticsThreshold().toNanos(); searchThreshold = config.searchThreshold().toNanos(); diff --git a/core-io/src/main/java/com/couchbase/client/core/config/AbstractBucketConfig.java b/core-io/src/main/java/com/couchbase/client/core/config/AbstractBucketConfig.java index 85096f1ec..55e7c34ed 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/AbstractBucketConfig.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/AbstractBucketConfig.java @@ -16,7 +16,10 @@ package com.couchbase.client.core.config; +import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.ClusterTopologyWithBucket; +import reactor.util.annotation.Nullable; import java.util.ArrayList; import java.util.Collections; @@ -29,6 +32,7 @@ import java.util.stream.Collectors; import static com.couchbase.client.core.util.CbCollections.isNullOrEmpty; +import static java.util.Objects.requireNonNull; @Deprecated public abstract class AbstractBucketConfig implements BucketConfig { @@ -45,6 +49,9 @@ public abstract class AbstractBucketConfig implements BucketConfig { private final List portInfos; private final ConfigVersion version; + // Null only if this bucket config was created by a legacy config parser + @Nullable private final ClusterTopologyWithBucket clusterTopology; + /** * A "dumb" constructor that assigns the given values * directly to the corresponding fields, without any funny business. @@ -60,7 +67,8 @@ protected AbstractBucketConfig( Map> clusterCapabilities, String origin, List portInfos, - ConfigVersion version + ConfigVersion version, + ClusterTopologyWithBucket clusterTopology ) { this.uuid = uuid; this.name = name; @@ -73,6 +81,7 @@ protected AbstractBucketConfig( this.origin = origin; this.portInfos = portInfos; this.version = version; + this.clusterTopology = requireNonNull(clusterTopology); } protected AbstractBucketConfig(String uuid, String name, BucketNodeLocator locator, String uri, String streamingUri, @@ -92,6 +101,15 @@ protected AbstractBucketConfig(String uuid, String name, BucketNodeLocator locat this.version = new ConfigVersion(revEpoch, rev); this.portInfos = portInfos == null ? Collections.emptyList() : portInfos; this.nodeInfo = portInfos == null ? nodeInfos : nodeInfoFromExtended(portInfos, nodeInfos); + this.clusterTopology = null; + } + + @Stability.Internal + public ClusterTopologyWithBucket asClusterTopology() { + if (clusterTopology == null) { + throw new IllegalStateException("This BucketConfig instance was not created from a ClusterTopologyWithBucket."); + } + return clusterTopology; } static Set convertBucketCapabilities(final List input) { diff --git a/core-io/src/main/java/com/couchbase/client/core/config/BucketConfig.java b/core-io/src/main/java/com/couchbase/client/core/config/BucketConfig.java index 89b1210b9..a3e1e7d9b 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/BucketConfig.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/BucketConfig.java @@ -16,9 +16,11 @@ package com.couchbase.client.core.config; +import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.service.ServiceType; import com.couchbase.client.core.deps.com.fasterxml.jackson.annotation.JsonSubTypes; import com.couchbase.client.core.deps.com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.couchbase.client.core.topology.ClusterTopologyWithBucket; import java.util.List; import java.util.Map; @@ -163,4 +165,9 @@ public interface BucketConfig { */ List portInfos(); + /** + * @throws IllegalStateException if this BucketConfig was not created from a ClusterTopologyWithBucket. + */ + @Stability.Internal + ClusterTopologyWithBucket asClusterTopology(); } diff --git a/core-io/src/main/java/com/couchbase/client/core/config/ClusterCapabilities.java b/core-io/src/main/java/com/couchbase/client/core/config/ClusterCapabilities.java index d183d93bd..258cde681 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/ClusterCapabilities.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/ClusterCapabilities.java @@ -22,7 +22,7 @@ * Contains all the cluster capabilities this SDK supports (depending on the server version, the cluster may * export more than these). * - * @deprecated In favor of {@link com.couchbase.client.core.topology.ClusterCapability + * @deprecated In favor of {@link com.couchbase.client.core.topology.ClusterCapability} */ @Deprecated public enum ClusterCapabilities { diff --git a/core-io/src/main/java/com/couchbase/client/core/config/ClusterConfig.java b/core-io/src/main/java/com/couchbase/client/core/config/ClusterConfig.java index abf1d140d..71fb38e88 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/ClusterConfig.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/ClusterConfig.java @@ -18,7 +18,11 @@ import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.ClusterTopology; +import com.couchbase.client.core.topology.ClusterTopologyWithBucket; +import reactor.util.annotation.Nullable; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Map; @@ -26,6 +30,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; +import static com.couchbase.client.core.util.CbCollections.transform; + /** * The {@link ClusterConfig} holds bucket and global configurations in a central place. */ @@ -49,6 +55,28 @@ public ClusterConfig() { globalConfig = new AtomicReference<>(); } + @Stability.Internal + public Set bucketNames() { + return bucketConfigs.keySet(); + } + + @Stability.Internal + public @Nullable ClusterTopologyWithBucket bucketTopology(final String bucketName) { + BucketConfig bucketConfig = bucketConfigs.get(bucketName); + return bucketConfig == null ? null : bucketConfig.asClusterTopology(); + } + + @Stability.Internal + public Collection bucketTopologies() { + return transform(bucketConfigs.values(), BucketConfig::asClusterTopology); + } + + @Stability.Internal + public @Nullable ClusterTopology globalTopology() { + GlobalConfig g = globalConfig(); + return g == null ? null : g.asClusterTopology(); + } + public BucketConfig bucketConfig(final String bucketName) { return bucketConfigs.get(bucketName); } diff --git a/core-io/src/main/java/com/couchbase/client/core/config/ConfigVersion.java b/core-io/src/main/java/com/couchbase/client/core/config/ConfigVersion.java index 7d648313b..beb70bde7 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/ConfigVersion.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/ConfigVersion.java @@ -23,7 +23,7 @@ import java.util.Objects; /** - * @deprecated In favor of {@link com.couchbase.client.core.topology.TopologyRevision + * @deprecated In favor of {@link com.couchbase.client.core.topology.TopologyRevision} */ @Deprecated @Stability.Internal diff --git a/core-io/src/main/java/com/couchbase/client/core/config/CouchbaseBucketConfig.java b/core-io/src/main/java/com/couchbase/client/core/config/CouchbaseBucketConfig.java index 529873d81..8366ce9d5 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/CouchbaseBucketConfig.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/CouchbaseBucketConfig.java @@ -131,7 +131,8 @@ public CouchbaseBucketConfig(ClusterTopologyWithBucket cluster) { getClusterCapabilities(cluster), "", getPortInfos(cluster), - LegacyConfigHelper.toLegacy(cluster.revision()) + LegacyConfigHelper.toLegacy(cluster.revision()), + cluster ); CouchbaseBucketTopology bucket = (CouchbaseBucketTopology) cluster.bucket(); @@ -164,7 +165,7 @@ private static List toLegacyPartitions( return transform(map.values(), it -> { short[] replicas = new short[numberOfReplicas]; for (int i = 0; i < numberOfReplicas; i++) { - replicas[i] = (short) it.nodeIndexForReplica(0).orElse(PARTITION_NOT_EXISTENT); + replicas[i] = (short) it.nodeIndexForReplica(i).orElse(PARTITION_NOT_EXISTENT); } return new Partition( (short) it.nodeIndexForActive().orElse(PARTITION_NOT_EXISTENT), diff --git a/core-io/src/main/java/com/couchbase/client/core/config/DefaultConfigurationProvider.java b/core-io/src/main/java/com/couchbase/client/core/config/DefaultConfigurationProvider.java index ba71cfb8d..a772a80f1 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/DefaultConfigurationProvider.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/DefaultConfigurationProvider.java @@ -58,7 +58,7 @@ import com.couchbase.client.core.msg.CancellationReason; import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.GetCollectionIdRequest; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.retry.BestEffortRetryStrategy; import com.couchbase.client.core.service.ServiceType; import com.couchbase.client.core.topology.ClusterTopology; diff --git a/core-io/src/main/java/com/couchbase/client/core/config/GlobalConfig.java b/core-io/src/main/java/com/couchbase/client/core/config/GlobalConfig.java index d12339942..a55a79ddc 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/GlobalConfig.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/GlobalConfig.java @@ -16,12 +16,15 @@ package com.couchbase.client.core.config; +import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.deps.com.fasterxml.jackson.annotation.JacksonInject; import com.couchbase.client.core.deps.com.fasterxml.jackson.annotation.JsonCreator; import com.couchbase.client.core.deps.com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.couchbase.client.core.deps.com.fasterxml.jackson.annotation.JsonProperty; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.ClusterIdentifier; import com.couchbase.client.core.topology.ClusterTopology; +import reactor.util.annotation.Nullable; import java.util.ArrayList; import java.util.List; @@ -45,11 +48,17 @@ public class GlobalConfig { private final ConfigVersion version; private final List portInfos; private final Map> clusterCapabilities; + private final @Nullable ClusterIdentifier clusterIdent; + + // Null only if the GlobalConfig was created by a legacy config parser + @Nullable private final ClusterTopology clusterTopology; public GlobalConfig(ClusterTopology topology) { this.version = LegacyConfigHelper.toLegacy(topology.revision()); this.portInfos = LegacyConfigHelper.getPortInfos(topology); this.clusterCapabilities = LegacyConfigHelper.getClusterCapabilities(topology); + this.clusterTopology = topology; + this.clusterIdent = topology.id(); } @JsonCreator @@ -63,9 +72,10 @@ public GlobalConfig( this.version = new ConfigVersion(revEpoch, rev); this.portInfos = enrichPortInfos(portInfos, origin); this.clusterCapabilities = AbstractBucketConfig.convertClusterCapabilities(clusterCapabilities); + this.clusterTopology = null; + this.clusterIdent = null; } - /** * Helper method to enrich the port infos with a synthetic origin host if not present. * @@ -81,7 +91,7 @@ private List enrichPortInfos(final List portInfos, final Str List enriched = new ArrayList<>(portInfos.size()); for (PortInfo portInfo : portInfos) { if (portInfo.hostname() == null) { - enriched.add(new PortInfo(portInfo.ports(), portInfo.sslPorts(), portInfo.alternateAddresses(), origin)); + enriched.add(new PortInfo(portInfo.ports(), portInfo.sslPorts(), portInfo.alternateAddresses(), origin, portInfo.serverGroup())); } else { enriched.add(portInfo); } @@ -120,6 +130,10 @@ public Map> clusterCapabilities() { return clusterCapabilities; } + @Nullable public ClusterIdentifier clusterIdent() { + return clusterIdent; + } + /** * The node/port infos for each node in the list. */ @@ -127,6 +141,17 @@ public List portInfos() { return portInfos; } + /** + * @throws IllegalStateException if this GlobalConfig was not created from a ClusterTopology. + */ + @Stability.Internal + ClusterTopology asClusterTopology() { + if (clusterTopology == null) { + throw new IllegalStateException("This GlobalConfig instance was not created from a ClusterTopology."); + } + return clusterTopology; + } + @Override public String toString() { return "GlobalConfig{" + diff --git a/core-io/src/main/java/com/couchbase/client/core/config/LegacyConfigHelper.java b/core-io/src/main/java/com/couchbase/client/core/config/LegacyConfigHelper.java index 9755e9251..160c32f7d 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/LegacyConfigHelper.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/LegacyConfigHelper.java @@ -53,7 +53,8 @@ static List getPortInfos(ClusterTopology topology) { tlsPorts(topology, it), emptyMap(), // The host is always accurate -- there is never an alternate. it.host(), - it.id().toLegacy() + it.id().toLegacy(), + it.serverGroup() ) ); } diff --git a/core-io/src/main/java/com/couchbase/client/core/config/MemcachedBucketConfig.java b/core-io/src/main/java/com/couchbase/client/core/config/MemcachedBucketConfig.java index 904664b35..df43eafb0 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/MemcachedBucketConfig.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/MemcachedBucketConfig.java @@ -91,7 +91,8 @@ public MemcachedBucketConfig( LegacyConfigHelper.getClusterCapabilities(cluster), "", LegacyConfigHelper.getPortInfos(cluster), - LegacyConfigHelper.toLegacy(cluster.revision()) + LegacyConfigHelper.toLegacy(cluster.revision()), + cluster ); this.ketamaRing = KetamaRing.create( diff --git a/core-io/src/main/java/com/couchbase/client/core/config/NodeInfo.java b/core-io/src/main/java/com/couchbase/client/core/config/NodeInfo.java index e2c0c69f1..3f30e739f 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/NodeInfo.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/NodeInfo.java @@ -157,10 +157,18 @@ public String hostname() { return hostname; } + /** + * @deprecated In favor of {@link #id()} + */ + @Deprecated public NodeIdentifier identifier() { return nodeIdentifier; } + public com.couchbase.client.core.topology.NodeIdentifier id() { + return nodeIdentifier.asTopologyNodeIdentifier(); + } + public Map services() { return directServices; } diff --git a/core-io/src/main/java/com/couchbase/client/core/config/PartitionInfo.java b/core-io/src/main/java/com/couchbase/client/core/config/PartitionInfo.java index 5c6a5f3a8..8cb87778d 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/PartitionInfo.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/PartitionInfo.java @@ -30,7 +30,7 @@ * Represents the partition information for a bucket. * * @since 1.1.0 - * @deprecated In favor of {@link com.couchbase.client.core.topology.PartitionMap + * @deprecated In favor of {@link com.couchbase.client.core.topology.PartitionMap} */ @Deprecated @JsonIgnoreProperties(ignoreUnknown = true) diff --git a/core-io/src/main/java/com/couchbase/client/core/config/PortInfo.java b/core-io/src/main/java/com/couchbase/client/core/config/PortInfo.java index 2c6f816c0..d5acd6297 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/PortInfo.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/PortInfo.java @@ -21,6 +21,7 @@ import com.couchbase.client.core.deps.com.fasterxml.jackson.annotation.JsonProperty; import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.service.ServiceType; +import reactor.util.annotation.Nullable; import java.util.Collections; import java.util.HashMap; @@ -30,7 +31,7 @@ import static java.util.Objects.requireNonNull; /** - * @deprecated In favor of {@link com.couchbase.client.core.topology.HostAndServicePorts + * @deprecated In favor of {@link com.couchbase.client.core.topology.HostAndServicePorts} */ @Deprecated @JsonIgnoreProperties(ignoreUnknown = true) @@ -40,6 +41,7 @@ public class PortInfo { private final Map sslPorts; private final Map alternateAddresses; private final String hostname; + private final @Nullable String serverGroup; private final NodeIdentifier nodeIdentifier; /** @@ -54,12 +56,14 @@ public class PortInfo { public PortInfo( @JsonProperty("services") Map services, @JsonProperty("hostname") String hostname, - @JsonProperty("alternateAddresses") Map aa + @JsonProperty("alternateAddresses") Map aa, + @JsonProperty("serverGroup") String serverGroup ) { ports = new HashMap<>(); sslPorts = new HashMap<>(); alternateAddresses = aa == null ? Collections.emptyMap() : aa; this.hostname = hostname; // might be null when decoded from JSON, covered at a higher level + this.serverGroup = serverGroup; extractPorts(services, ports, sslPorts); @@ -75,11 +79,12 @@ public PortInfo( * @param hostname the hostname of the port info (node). */ PortInfo(final Map ports, final Map sslPorts, - final Map alternateAddresses, final String hostname) { + final Map alternateAddresses, final String hostname, final @Nullable String serverGroup) { this.ports = requireNonNull(ports); this.sslPorts = requireNonNull(sslPorts); this.alternateAddresses = requireNonNull(alternateAddresses); this.hostname = requireNonNull(hostname); + this.serverGroup = serverGroup; this.nodeIdentifier = initNodeIdentifier(hostname, ports, sslPorts); } @@ -88,17 +93,27 @@ public PortInfo( final Map sslPorts, final Map alternateAddresses, final String hostname, - final NodeIdentifier nodeIdentifier + final NodeIdentifier nodeIdentifier, + final @Nullable String serverGroup ) { this.ports = requireNonNull(ports); this.sslPorts = requireNonNull(sslPorts); this.alternateAddresses = requireNonNull(alternateAddresses); this.hostname = requireNonNull(hostname); this.nodeIdentifier = requireNonNull(nodeIdentifier); + this.serverGroup = serverGroup; } + /** + * @deprecated In favor of {@link #id()} + */ + @Deprecated public NodeIdentifier identifier() { - return nodeIdentifier; + return nodeIdentifier; + } + + public com.couchbase.client.core.topology.NodeIdentifier id() { + return nodeIdentifier.asTopologyNodeIdentifier(); } /** @@ -183,6 +198,11 @@ public Map alternateAddresses() { return alternateAddresses; } + @Nullable + public String serverGroup() { + return serverGroup; + } + @Override public String toString() { return "PortInfo{" @@ -190,6 +210,7 @@ public String toString() { + ", sslPorts=" + sslPorts + ", hostname='" + hostname + ", alternateAddresses=" + alternateAddresses + + ", serverGroup=" + serverGroup + '\'' + '}'; } diff --git a/core-io/src/main/java/com/couchbase/client/core/config/loader/BaseBucketLoader.java b/core-io/src/main/java/com/couchbase/client/core/config/loader/BaseBucketLoader.java index eb14ad0c3..5e4333b71 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/loader/BaseBucketLoader.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/loader/BaseBucketLoader.java @@ -20,9 +20,9 @@ import com.couchbase.client.core.config.ProposedBucketConfigContext; import com.couchbase.client.core.error.ConfigException; import com.couchbase.client.core.error.SeedNodeOutdatedException; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.service.ServiceState; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -82,7 +82,7 @@ public abstract class BaseBucketLoader implements BucketLoader { * all non-config exceptions into config exceptions so that the upper level only needs to handle * one specific exception type.

* - *

At this point, we are passing an {@link Optional#empty()} for alternate addresses when the + *

At this point, we are passing a null for server group info when the * service is created, since we do not have a config to check against at this point. The config provider * will take care of this at a later point in time, before the rest of the bootstrap happens.

* @@ -98,8 +98,8 @@ public Mono load(final NodeIdentifier seed, final i .then(ensureServiceConnected(seed, serviceType, Optional.of(bucket))) .then(discoverConfig(seed, bucket)) .map(config -> new String(config, UTF_8)) - .map(config -> config.replace("$HOST", seed.address())) - .map(config -> new ProposedBucketConfigContext(bucket, config, seed.address())) + .map(config -> config.replace("$HOST", seed.hostForNetworkConnections())) + .map(config -> new ProposedBucketConfigContext(bucket, config, seed.hostForNetworkConnections())) .onErrorResume(ex -> Mono.error(ex instanceof ConfigException ? ex : new ConfigException("Caught exception while loading config.", ex) diff --git a/core-io/src/main/java/com/couchbase/client/core/config/loader/BucketLoader.java b/core-io/src/main/java/com/couchbase/client/core/config/loader/BucketLoader.java index ffd3aa617..a0006ec97 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/loader/BucketLoader.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/loader/BucketLoader.java @@ -17,7 +17,7 @@ package com.couchbase.client.core.config.loader; import com.couchbase.client.core.config.ProposedBucketConfigContext; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Mono; /** diff --git a/core-io/src/main/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoader.java b/core-io/src/main/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoader.java index d1d2b96ca..83f8f4f41 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoader.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoader.java @@ -23,9 +23,9 @@ import com.couchbase.client.core.error.ConfigException; import com.couchbase.client.core.error.NoAccessDuringConfigLoadException; import com.couchbase.client.core.msg.manager.BucketConfigRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.BestEffortRetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Mono; import static com.couchbase.client.core.logging.RedactableArgument.redactMeta; diff --git a/core-io/src/main/java/com/couchbase/client/core/config/loader/GlobalLoader.java b/core-io/src/main/java/com/couchbase/client/core/config/loader/GlobalLoader.java index f466892f7..144d4bce2 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/loader/GlobalLoader.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/loader/GlobalLoader.java @@ -26,9 +26,9 @@ import com.couchbase.client.core.error.UnsupportedConfigMechanismException; import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.CarrierGlobalConfigRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.BestEffortRetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Mono; import java.util.Optional; @@ -53,9 +53,9 @@ public GlobalLoader(final Core core) { /** * Tries to load the global configuration. * - *

Please note that at this point, we are passing an {@link Optional#empty()} for alternate addresses when the + *

Please note that at this point, we are passing a null for server group info when the * service is created, since we do not have a config to check against at this point. The config provider - * will take care of this at a later point in time, before the rest of the bootstrap happens.

+ * will take care of this at a later point in time, before the rest of the bootstrap happens. * * @param seed the seed node to load from. * @param port the port number for the KV service. @@ -66,8 +66,8 @@ public Mono load(final NodeIdentifier seed, final i .ensureServiceAt(seed, ServiceType.KV, port, Optional.empty()) .then(discoverConfig(seed)) .map(config -> new String(config, UTF_8)) - .map(config -> config.replace("$HOST", seed.address())) - .map(config -> new ProposedGlobalConfigContext(config, seed.address())) + .map(config -> config.replace("$HOST", seed.hostForNetworkConnections())) + .map(config -> new ProposedGlobalConfigContext(config, seed.hostForNetworkConnections())) .onErrorResume(ex -> Mono.error(ex instanceof ConfigException ? ex : new ConfigException("Caught exception while loading global config.", ex) diff --git a/core-io/src/main/java/com/couchbase/client/core/config/loader/KeyValueBucketLoader.java b/core-io/src/main/java/com/couchbase/client/core/config/loader/KeyValueBucketLoader.java index f35f8ef9b..cd4b1ee1a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/loader/KeyValueBucketLoader.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/loader/KeyValueBucketLoader.java @@ -25,9 +25,9 @@ import com.couchbase.client.core.io.CollectionIdentifier; import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.CarrierBucketConfigRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.BestEffortRetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Mono; import java.util.Optional; diff --git a/core-io/src/main/java/com/couchbase/client/core/config/refresher/GlobalRefresher.java b/core-io/src/main/java/com/couchbase/client/core/config/refresher/GlobalRefresher.java index 8d05dc0bb..6b4a7f769 100644 --- a/core-io/src/main/java/com/couchbase/client/core/config/refresher/GlobalRefresher.java +++ b/core-io/src/main/java/com/couchbase/client/core/config/refresher/GlobalRefresher.java @@ -163,7 +163,7 @@ private Flux attemptUpdateGlobalConfig(final Flux

fetchConfigPerNode(final String name, ctx, new CollectionIdentifier(name, Optional.empty(), Optional.empty()), FailFastRetryStrategy.INSTANCE, - nodeInfo.identifier(), + nodeInfo.id(), currentVersion(name) ); core.send(request); diff --git a/core-io/src/main/java/com/couchbase/client/core/diagnostics/HealthPinger.java b/core-io/src/main/java/com/couchbase/client/core/diagnostics/HealthPinger.java index 7ec808b5f..d7ed88287 100644 --- a/core-io/src/main/java/com/couchbase/client/core/diagnostics/HealthPinger.java +++ b/core-io/src/main/java/com/couchbase/client/core/diagnostics/HealthPinger.java @@ -19,11 +19,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.Reactor; import com.couchbase.client.core.annotation.Stability; -import com.couchbase.client.core.config.BucketConfig; import com.couchbase.client.core.config.ClusterConfig; -import com.couchbase.client.core.config.GlobalConfig; -import com.couchbase.client.core.config.NodeInfo; -import com.couchbase.client.core.config.PortInfo; import com.couchbase.client.core.endpoint.http.CoreCommonOptions; import com.couchbase.client.core.endpoint.http.CoreHttpRequest; import com.couchbase.client.core.error.TimeoutException; @@ -32,22 +28,25 @@ import com.couchbase.client.core.msg.RequestTarget; import com.couchbase.client.core.msg.kv.KvPingRequest; import com.couchbase.client.core.msg.kv.KvPingResponse; -import com.couchbase.client.core.node.NodeIdentifier; -import com.couchbase.client.core.protostellar.CoreProtostellarUtil; import com.couchbase.client.core.retry.RetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.BucketCapability; +import com.couchbase.client.core.topology.ClusterTopology; +import com.couchbase.client.core.topology.ClusterTopologyWithBucket; +import com.couchbase.client.core.topology.HostAndServicePorts; import com.couchbase.client.core.util.CbThrowables; -import com.couchbase.client.core.util.HostAndPort; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.util.annotation.Nullable; import java.time.Duration; +import java.util.ArrayList; import java.util.Collection; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.UUID; @@ -65,7 +64,6 @@ import static com.couchbase.client.core.util.CbCollections.transformValues; import static java.util.Collections.unmodifiableSet; import static java.util.stream.Collectors.groupingBy; -import static java.util.stream.Collectors.toSet; /** * The {@link HealthPinger} allows to "ping" individual services with real operations for their health. @@ -86,6 +84,11 @@ public class HealthPinger { ANALYTICS )); + private static final Set servicesThatRequireBucket = unmodifiableSet(EnumSet.of( + KV, + VIEWS + )); + @Stability.Internal public static Mono ping( final Core core, @@ -143,85 +146,59 @@ static Set extractPingTargets( final Optional bucketName, final WaitUntilReadyHelper.WaitUntilReadyLogger log ) { - final Set serviceTypes = isNullOrEmpty(serviceTypesOrEmpty) + final String bucket = bucketName.orElse(null); + final Set serviceTypeFilter = isNullOrEmpty(serviceTypesOrEmpty) ? EnumSet.allOf(ServiceType.class) : EnumSet.copyOf(serviceTypesOrEmpty); - serviceTypes.retainAll(pingableServices); // narrow to the ones we can actually ping - - Set targets = new HashSet<>(); - log.message("extractPingTargets: starting ping target extraction with candidate services: " + serviceTypes); - - if (!bucketName.isPresent()) { - if (clusterConfig.globalConfig() != null) { - GlobalConfig globalConfig = clusterConfig.globalConfig(); + serviceTypeFilter.retainAll(pingableServices); // narrow to the ones we can actually ping - log.message("extractPingTargets: getting ping targets from global config portInfos: " + globalConfig.portInfos()); - for (PortInfo portInfo : globalConfig.portInfos()) { - for (ServiceType serviceType : portInfo.ports().keySet()) { - if (serviceType == ServiceType.KV || serviceType == ServiceType.VIEWS) { - // do not check bucket-level resources from a global level (null bucket name will not work) - continue; - } - RequestTarget target = new RequestTarget(serviceType, portInfo.identifier(), null); - log.message("extractPingTargets: adding target from global config: " + target); - targets.add(target); - } - } - log.message("extractPingTargets: ping targets after scanning global config: " + targets); - } else { - log.message("extractPingTargets: globalConfig is absent"); - } - for (Map.Entry bucketConfig : clusterConfig.bucketConfigs().entrySet()) { - log.message("extractPingTargets: getting targets from bucket config via global config for bucket " + bucketConfig.getKey() + " : " + bucketConfig.getValue()); + log.message("extractPingTargets: starting ping target extraction with candidate services: " + serviceTypeFilter + " and bucket: " + bucket); - for (NodeInfo nodeInfo : bucketConfig.getValue().nodes()) { - for (ServiceType serviceType : nodeInfo.services().keySet()) { - if (serviceType == ServiceType.KV || serviceType == ServiceType.VIEWS) { - // do not check bucket-level resources from a global level (null bucket name will not work) - continue; - } - RequestTarget target = new RequestTarget(serviceType, nodeInfo.identifier(), null); - log.message("extractPingTargets: adding target from bucket config via global config: " + target); - targets.add(new RequestTarget(serviceType, nodeInfo.identifier(), null)); - } - } + final List topologiesToScan = new ArrayList<>(); + if (bucket != null) { + ClusterTopologyWithBucket topology = clusterConfig.bucketTopology(bucket); + if (topology != null && !topology.bucket().hasCapability(BucketCapability.COUCHAPI)) { + serviceTypeFilter.remove(VIEWS); } + topologiesToScan.add(topology); } else { - BucketConfig bucketConfig = clusterConfig.bucketConfig(bucketName.get()); - if (bucketConfig != null) { - log.message("extractPingTargets: Getting targets from bucket config: " + bucketConfig); - for (NodeInfo nodeInfo : bucketConfig.nodes()) { - for (ServiceType serviceType : nodeInfo.services().keySet()) { - RequestTarget target; - if (serviceType != ServiceType.VIEWS && serviceType != ServiceType.KV) { - target = new RequestTarget(serviceType, nodeInfo.identifier(), null); - } else { - target = new RequestTarget(serviceType, nodeInfo.identifier(), bucketName.get()); - } + serviceTypeFilter.removeAll(servicesThatRequireBucket); // narrow to the ones that can be pinged without a bucket + topologiesToScan.add(clusterConfig.globalTopology()); + topologiesToScan.addAll(clusterConfig.bucketTopologies()); + } - log.message("extractPingTargets: adding target from bucket config: " + target); - targets.add(target); + Set result = new HashSet<>(); + topologiesToScan.stream() + .filter(Objects::nonNull) // global or specific bucket topology might be absent + .forEach(topology -> { + log.message("extractPingTargets: scanning " + describe(topology)); + for (HostAndServicePorts node : topology.nodes()) { + for (ServiceType advertisedService : advertisedServices(node)) { + if (serviceTypeFilter.contains(advertisedService)) { + boolean serviceRequiresBucket = servicesThatRequireBucket.contains(advertisedService); + String targetBucketOrNull = serviceRequiresBucket ? bucket : null; + RequestTarget target = new RequestTarget(advertisedService, node.id(), targetBucketOrNull); + if (result.add(target)) { + log.message("extractPingTargets: found new target " + target); + } + } } } - } else { - log.message("extractPingTargets: Bucket name was present, but clusterConfig has no config for bucket " + bucketName); - } - } - - // Narrow the results to only pingable services the caller is interested in. - targets = targets.stream() - .filter(t -> serviceTypes.contains(t.serviceType())) - .collect(toSet()); + }); - log.message( - "extractPingTargets: Finished. Returning filtered targets (grouped by node): " + formatGroupedByNode(targets) - ); + log.message("extractPingTargets: Finished. Returning filtered targets (grouped by node): " + formatGroupedByNode(result)); + return result; + } - return targets; + private static String describe(ClusterTopology topology) { + String bucketOrGlobal = (topology instanceof ClusterTopologyWithBucket) + ? "bucket '" + topology.requireBucket().bucket().name() + "'" + : "global"; + return "topology from " + bucketOrGlobal + " ; nodes=" + topology.nodes(); } - private static String format(NodeIdentifier id) { - return new HostAndPort(id.address(), id.managerPort()).format(); + private static Set advertisedServices(HostAndServicePorts node) { + return node.ports().keySet(); } /** @@ -230,7 +207,7 @@ private static String format(NodeIdentifier id) { */ static Map> formatGroupedByNode(Collection targets) { Map> grouped = targets.stream() - .collect(Collectors.groupingBy(requestTarget -> redactSystem(format(requestTarget.nodeIdentifier())).toString())); + .collect(Collectors.groupingBy(requestTarget -> redactSystem(requestTarget.nodeIdentifier()).toString())); return transformValues(grouped, it -> transform(it, target -> target.serviceType().toString())); } diff --git a/core-io/src/main/java/com/couchbase/client/core/endpoint/DeferredCloseEndpoint.java b/core-io/src/main/java/com/couchbase/client/core/endpoint/DeferredCloseEndpoint.java index f3bd578eb..23aa4b7cb 100644 --- a/core-io/src/main/java/com/couchbase/client/core/endpoint/DeferredCloseEndpoint.java +++ b/core-io/src/main/java/com/couchbase/client/core/endpoint/DeferredCloseEndpoint.java @@ -61,10 +61,21 @@ private void closeWhenDone() { @Stability.Internal @Override public synchronized void markRequestCompletion() { + maybeResumeDisconnect(); super.markRequestCompletion(); - if (closeWhenDone && outstandingRequests() <= 0) { + } + + @Stability.Internal + @Override + public synchronized void notifyChannelInactive() { + maybeResumeDisconnect(); + super.notifyChannelInactive(); + } + + private void maybeResumeDisconnect() { + if (closeWhenDone) { endpointContext.get().environment().eventBus().publish(new EndpointDisconnectResumedEvent(endpointContext.get())); - closeChannel(this.channel); + super.disconnect(); closeWhenDone = false; } } diff --git a/core-io/src/main/java/com/couchbase/client/core/endpoint/ProtostellarEndpoint.java b/core-io/src/main/java/com/couchbase/client/core/endpoint/ProtostellarEndpoint.java index b686418e7..516496889 100644 --- a/core-io/src/main/java/com/couchbase/client/core/endpoint/ProtostellarEndpoint.java +++ b/core-io/src/main/java/com/couchbase/client/core/endpoint/ProtostellarEndpoint.java @@ -234,9 +234,9 @@ private ManagedChannel channel(ProtostellarContext ctx) { // Retry strategies to be determined, but presumably we will need something custom rather than what GRPC provides .disableRetry(); - if (ctx.environment().requestTracer() != null - && ctx.environment().requestTracer() instanceof GrpcAwareRequestTracer) { - ((GrpcAwareRequestTracer) ctx.environment().requestTracer()).registerGrpc(builder); + if (ctx.coreResources().requestTracer() != null + && ctx.coreResources().requestTracer() instanceof GrpcAwareRequestTracer) { + ((GrpcAwareRequestTracer) ctx.coreResources().requestTracer()).registerGrpc(builder); } // JVMCBC-1187: experimental code for performance testing that will be removed pre-GA. diff --git a/core-io/src/main/java/com/couchbase/client/core/endpoint/http/CoreHttpRequest.java b/core-io/src/main/java/com/couchbase/client/core/endpoint/http/CoreHttpRequest.java index 70a1c8c49..c58a0011f 100644 --- a/core-io/src/main/java/com/couchbase/client/core/endpoint/http/CoreHttpRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/endpoint/http/CoreHttpRequest.java @@ -40,9 +40,9 @@ import com.couchbase.client.core.msg.BaseRequest; import com.couchbase.client.core.msg.NonChunkedHttpRequest; import com.couchbase.client.core.msg.RequestTarget; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.UrlQueryStringBuilder; import reactor.util.annotation.Nullable; @@ -161,7 +161,7 @@ public Map serviceContext() { ctx.put("method", method.toString()); ctx.put("path", redactMeta(pathAndQueryString())); if (target() != null) { - ctx.put("target", redactSystem(target().address())); + ctx.put("target", redactSystem(target())); } if (bucket() != null) { ctx.put("bucket", redactMeta(bucket())); @@ -320,7 +320,7 @@ public Builder content(byte[] content, CharSequence contentType) { } public CoreHttpRequest build() { - RequestSpan span = spanName == null ? null : CbTracing.newSpan(coreContext.environment().requestTracer(), spanName, options.parentSpan().orElse(null)); + RequestSpan span = spanName == null ? null : coreContext.coreResources().requestTracer().requestSpan(spanName, options.parentSpan().orElse(null)); if (span != null && !CbTracing.isInternalSpan(span)) { if (target.bucketName() != null) { diff --git a/core-io/src/main/java/com/couchbase/client/core/env/BuilderPropertySetter.java b/core-io/src/main/java/com/couchbase/client/core/env/BuilderPropertySetter.java index 81d0f37f3..f3f9bdd0e 100644 --- a/core-io/src/main/java/com/couchbase/client/core/env/BuilderPropertySetter.java +++ b/core-io/src/main/java/com/couchbase/client/core/env/BuilderPropertySetter.java @@ -44,11 +44,11 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Function; -import java.util.stream.Collectors; import static com.couchbase.client.core.util.CbCollections.mapCopyOf; import static com.couchbase.client.core.util.CbCollections.mapOf; import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; @SuppressWarnings("rawtypes") @Stability.Internal @@ -60,16 +60,22 @@ public class BuilderPropertySetter { // Escape hatch in case some accessors don't follow the convention. private final Map irregularChildBuilderAccessors; + // Converts an input path component to match the Java method name, + // for translating case conventions. + private final Function pathComponentTransformer; + public BuilderPropertySetter() { - this("Config", mapOf("ioEnvironment", "ioEnvironment")); + this("Config", mapOf("ioEnvironment", "ioEnvironment"), name -> name); } public BuilderPropertySetter( String childBuilderAccessorSuffix, - Map irregularChildBuilderAccessors + Map irregularChildBuilderAccessors, + Function pathComponentTransformer ) { this.childBuilderAccessorSuffix = requireNonNull(childBuilderAccessorSuffix); this.irregularChildBuilderAccessors = mapCopyOf(irregularChildBuilderAccessors); + this.pathComponentTransformer = requireNonNull(pathComponentTransformer); } /** @@ -83,10 +89,11 @@ public void set(Object builder, Map properties) { * @throws InvalidPropertyException if the property could not be applied to the builder */ public void set(Object builder, String propertyName, String propertyValue) { - - try { - final List propertyComponents = Arrays.asList(propertyName.split("\\.", -1)); + final List propertyComponents = Arrays.stream(propertyName.split("\\.", -1)) + .map(pathComponentTransformer) + .collect(toList()); + final List pathToBuilder = propertyComponents.subList(0, propertyComponents.size() - 1); final String setterName = propertyComponents.get(propertyComponents.size() - 1); @@ -111,7 +118,7 @@ public void set(Object builder, String propertyName, String propertyValue) { final List candidates = Arrays.stream(builder.getClass().getMethods()) .filter(m -> m.getName().equals(setterName)) .filter(m -> m.getParameterCount() == 1) - .collect(Collectors.toList()); + .collect(toList()); if (candidates.isEmpty()) { throw InvalidArgumentException.fromMessage("No one-arg setter for property \"" + propertyName + "\" in " + builder.getClass()); @@ -256,7 +263,7 @@ private static E convertEnum(Class enumClass, String value) try { return (E) Enum.valueOf(enumClass, value); } catch (IllegalArgumentException e) { - List enumValueNames = Arrays.stream(enumClass.getEnumConstants()).map(Enum::name).collect(Collectors.toList()); + List enumValueNames = Arrays.stream(enumClass.getEnumConstants()).map(Enum::name).collect(toList()); throw InvalidArgumentException.fromMessage("Expected one of " + enumValueNames + " but got \"" + value + "\""); } } diff --git a/core-io/src/main/java/com/couchbase/client/core/env/CoreEnvironment.java b/core-io/src/main/java/com/couchbase/client/core/env/CoreEnvironment.java index 7bef1913f..2ea5c4216 100644 --- a/core-io/src/main/java/com/couchbase/client/core/env/CoreEnvironment.java +++ b/core-io/src/main/java/com/couchbase/client/core/env/CoreEnvironment.java @@ -42,9 +42,12 @@ import com.couchbase.client.core.transaction.config.CoreTransactionsConfig; import com.couchbase.client.core.transaction.forwards.CoreTransactionsSupportedExtensions; import com.couchbase.client.core.transaction.util.CoreTransactionsSchedulers; +import com.couchbase.client.core.util.ReactorOps; +import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.scheduler.Scheduler; import reactor.core.scheduler.Schedulers; +import reactor.util.annotation.Nullable; import java.time.Duration; import java.util.ArrayList; @@ -63,6 +66,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import java.util.function.Supplier; import static com.couchbase.client.core.env.OwnedOrExternal.external; import static com.couchbase.client.core.env.OwnedOrExternal.owned; @@ -75,7 +79,7 @@ * Note that unless you are using the core directly, you want to consider the child implementations for each * language binding (i.e. the ClusterEnvironment for the java client). */ -public class CoreEnvironment implements AutoCloseable { +public class CoreEnvironment implements ReactorOps, AutoCloseable { private static final VersionAndGitHash coreVersion = VersionAndGitHash.from(Core.class); private static final String CORE_AGENT_TITLE = "java-core"; @@ -112,6 +116,7 @@ public class CoreEnvironment implements AutoCloseable { private final RetryStrategy retryStrategy; private final OwnedOrExternal scheduler; private final OwnedOrExternal executor; + @Nullable private final Supplier userScheduler; private final int schedulerThreadCount; private final OrphanReporter orphanReporter; private final long maxNumRequestsInRetry; @@ -120,6 +125,7 @@ public class CoreEnvironment implements AutoCloseable { private final Set appliedProfiles; private final CoreTransactionsSchedulers transactionsSchedulers = new CoreTransactionsSchedulers(); + private final @Nullable String preferredServerGroup; public static CoreEnvironment create() { return builder().build(); @@ -138,6 +144,7 @@ protected CoreEnvironment(final Builder builder) { .orElse(owned( Schedulers.newParallel("cb-comp", schedulerThreadCount, true)) ); + this.userScheduler = builder.userScheduler; // JVMCBC-1196: configuration options for the executor will be provided. String executorMaxThreadCountRaw = System.getProperty("com.couchbase.protostellar.executorMaxThreadCount"); @@ -208,6 +215,7 @@ protected CoreEnvironment(final Builder builder) { } this.requestCallbacks = Collections.unmodifiableList(builder.requestCallbacks); + this.preferredServerGroup = builder.preferredServerGroup; checkInsecureTlsConfig(); } @@ -351,6 +359,26 @@ public Scheduler scheduler() { return scheduler.get(); } + /** + * Returns the supplier for the scheduler where Reactive API results should be published, + * or null if the user does not want to switch schedulers. + */ + @Stability.Internal + @Nullable + public Supplier userScheduler() { + return userScheduler; + } + + @Stability.Internal + public Mono publishOnUserScheduler(Mono mono) { + return userScheduler == null ? mono : Mono.defer(() -> mono.publishOn(userScheduler.get())); + } + + @Stability.Internal + public Flux publishOnUserScheduler(Flux flux) { + return userScheduler == null ? flux : Flux.defer(() -> flux.publishOn(userScheduler.get())); + } + /** * Returns the executor used to schedule non-reactive async tasks across the SDK. */ @@ -363,8 +391,13 @@ public Executor executor() { * Returns the request tracer for response time observability. *

* Note that this right now is unsupported, volatile API and subject to change! + * + * @deprecated consumers should use {@link com.couchbase.client.core.CoreResources} instead, as the RequestTracer returned + * from that adds useful additional spans that are specific to a Core, and cannot be added here (as this class can be + * shared between Clusters). */ @Stability.Volatile + @Deprecated public RequestTracer requestTracer() { return requestTracer.get(); } @@ -420,6 +453,13 @@ public CoreTransactionsSchedulers transactionsSchedulers() { return transactionsSchedulers; } + /** + * The preferred server group to use for operations that support such. + */ + public @Nullable String preferredServerGroup() { + return preferredServerGroup; + } + /** * Shuts down this Environment with the default disconnect timeout. * @@ -587,6 +627,7 @@ public static class Builder> { private LoggingMeterConfig.Builder loggingMeterConfig = new LoggingMeterConfig.Builder(); private OwnedOrExternal eventBus = null; private OwnedOrExternal scheduler = null; + private Supplier userScheduler = null; private int schedulerThreadCount = Schedulers.DEFAULT_POOL_SIZE; private OwnedOrExternal requestTracer = null; private OwnedOrExternal meter = null; @@ -594,6 +635,7 @@ public static class Builder> { private long maxNumRequestsInRetry = DEFAULT_MAX_NUM_REQUESTS_IN_RETRY; private final List requestCallbacks = new ArrayList<>(); protected CoreTransactionsConfig transactionsConfig = null; + private String preferredServerGroup = null; private final Set appliedProfiles = new LinkedHashSet<>(); @@ -892,6 +934,22 @@ public SELF thresholdLoggingTracerConfig(final ThresholdLoggingTracerConfig.Buil return self(); } + /** + * Specifies the supplier the SDK uses to get the Scheduler for publishing Reactive API results. + *

+ * Defaults to null, which means reactive results are published immediately + * in a thread owned by the SDK -- typically the SDK's Netty event loop. + *

+ * The supplier is invoked once for every subscription, by the same thread that subscribes to the Mono/Flux. + * + * @return this {@link Builder} for chaining purposes. + */ + @Stability.Volatile + public SELF publishOnScheduler(@Nullable final Supplier publishOnScheduler) { + this.userScheduler = publishOnScheduler; + return self(); + } + public ThresholdLoggingTracerConfig.Builder thresholdLoggingTracerConfig() { return thresholdLoggingTracerConfig; } @@ -1069,8 +1127,7 @@ public SELF retryStrategy(final RetryStrategy retryStrategy) { public SELF requestTracer(final RequestTracer requestTracer) { notNull(requestTracer, "RequestTracer"); - boolean ignoresAttributes = CbTracing.isInternalTracer(requestTracer); - this.requestTracer = external(ignoresAttributes ? requestTracer : new RequestTracerWithCommonAttributes(requestTracer)); + this.requestTracer = external(requestTracer); return self(); } @@ -1150,6 +1207,16 @@ public SELF applyProfile(final String profileName) { + registeredProfileNames()); } + /** + * Sets a preferred server group, that will be used for operations that support this feature. + * + * @return this {@link Builder} for chaining purposes. + */ + public SELF preferredServerGroup(final @Nullable String preferredServerGroup) { + this.preferredServerGroup = preferredServerGroup; + return self(); + } + /** * You might wonder why callers can't use * {@link #load(PropertyLoader)} to load system properties. diff --git a/core-io/src/main/java/com/couchbase/client/core/env/IoConfig.java b/core-io/src/main/java/com/couchbase/client/core/env/IoConfig.java index 187733d5b..c33a00aee 100644 --- a/core-io/src/main/java/com/couchbase/client/core/env/IoConfig.java +++ b/core-io/src/main/java/com/couchbase/client/core/env/IoConfig.java @@ -24,6 +24,7 @@ import com.couchbase.client.core.node.StandardMemcachedHashingStrategy; import com.couchbase.client.core.service.AbstractPooledEndpointServiceConfig; import com.couchbase.client.core.service.ServiceType; +import reactor.util.annotation.Nullable; import java.time.Duration; import java.util.Arrays; diff --git a/core-io/src/main/java/com/couchbase/client/core/env/PasswordAuthenticator.java b/core-io/src/main/java/com/couchbase/client/core/env/PasswordAuthenticator.java index 88cd7e953..9e8aefa19 100644 --- a/core-io/src/main/java/com/couchbase/client/core/env/PasswordAuthenticator.java +++ b/core-io/src/main/java/com/couchbase/client/core/env/PasswordAuthenticator.java @@ -227,7 +227,7 @@ private Builder(String username, String password) { } /** - * @deprecated Please use {@link Builder(String, String) or {@link Builder(Supplier)} instead. + * @deprecated Please use {@link Builder(String, String)} or {@link Builder(Supplier)} instead. */ @Deprecated public Builder() { diff --git a/core-io/src/main/java/com/couchbase/client/core/env/RequestTracerWithCommonAttributes.java b/core-io/src/main/java/com/couchbase/client/core/env/RequestTracerDecorator.java similarity index 64% rename from core-io/src/main/java/com/couchbase/client/core/env/RequestTracerWithCommonAttributes.java rename to core-io/src/main/java/com/couchbase/client/core/env/RequestTracerDecorator.java index c652eefaf..b4a11f5d6 100644 --- a/core-io/src/main/java/com/couchbase/client/core/env/RequestTracerWithCommonAttributes.java +++ b/core-io/src/main/java/com/couchbase/client/core/env/RequestTracerDecorator.java @@ -16,29 +16,41 @@ package com.couchbase.client.core.env; +import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.cnc.RequestTracer; import com.couchbase.client.core.cnc.TracingIdentifiers; +import com.couchbase.client.core.topology.ClusterIdentifier; import reactor.core.publisher.Mono; +import reactor.util.annotation.Nullable; import java.time.Duration; +import java.util.function.Supplier; import static java.util.Objects.requireNonNull; /** - * Applies a set of common attributes to spans created by this tracer. + * Wraps a {@link RequestTracer} to provide new attributes. */ -class RequestTracerWithCommonAttributes implements RequestTracer { +@Stability.Internal +public class RequestTracerDecorator implements RequestTracer { private final RequestTracer wrapped; + private final Supplier clusterIdentSupplier; - RequestTracerWithCommonAttributes(RequestTracer wrapped) { + public RequestTracerDecorator(RequestTracer wrapped, Supplier clusterIdentSupplier) { this.wrapped = requireNonNull(wrapped); + this.clusterIdentSupplier = clusterIdentSupplier; } @Override public RequestSpan requestSpan(String name, RequestSpan parent) { RequestSpan span = wrapped.requestSpan(name, parent); span.lowCardinalityAttribute(TracingIdentifiers.ATTR_SYSTEM, TracingIdentifiers.ATTR_SYSTEM_COUCHBASE); + ClusterIdentifier clusterIdent = clusterIdentSupplier.get(); + if (clusterIdent != null) { + span.attribute(TracingIdentifiers.ATTR_CLUSTER_NAME, clusterIdent.clusterName()); + span.attribute(TracingIdentifiers.ATTR_CLUSTER_UUID, clusterIdent.clusterUuid()); + } return span; } diff --git a/core-io/src/main/java/com/couchbase/client/core/error/DocumentUnretrievableException.java b/core-io/src/main/java/com/couchbase/client/core/error/DocumentUnretrievableException.java index 03193c9f4..0b1a3d05d 100644 --- a/core-io/src/main/java/com/couchbase/client/core/error/DocumentUnretrievableException.java +++ b/core-io/src/main/java/com/couchbase/client/core/error/DocumentUnretrievableException.java @@ -28,4 +28,11 @@ public DocumentUnretrievableException(final ErrorContext ctx) { super("No document retrievable with a successful status", ctx); } + public DocumentUnretrievableException(final String message, final ErrorContext ctx) { + super(message, ctx); + } + + public static DocumentUnretrievableException noReplicasSuitable() { + return new DocumentUnretrievableException("No suitable replicas were available. Note that it is advised to always have a try-catch fallback to e.g. a regular get, when using replica gets", null); + } } diff --git a/core-io/src/main/java/com/couchbase/client/core/io/netty/NonChunkedHttpMessageHandler.java b/core-io/src/main/java/com/couchbase/client/core/io/netty/NonChunkedHttpMessageHandler.java index a0602d3ce..360a53ebd 100644 --- a/core-io/src/main/java/com/couchbase/client/core/io/netty/NonChunkedHttpMessageHandler.java +++ b/core-io/src/main/java/com/couchbase/client/core/io/netty/NonChunkedHttpMessageHandler.java @@ -163,7 +163,7 @@ public void write(final ChannelHandlerContext ctx, final Object msg, final Chann encoded.headers().set(HttpHeaderNames.USER_AGENT, endpointContext.environment().userAgent().formattedLong()); dispatchTimingStart = System.nanoTime(); if (currentRequest.requestSpan() != null) { - RequestTracer tracer = endpointContext.environment().requestTracer(); + RequestTracer tracer = endpointContext.coreResources().requestTracer(); currentDispatchSpan = tracer.requestSpan(TracingIdentifiers.SPAN_DISPATCH, currentRequest.requestSpan()); if (!CbTracing.isInternalTracer(tracer)) { diff --git a/core-io/src/main/java/com/couchbase/client/core/io/netty/chunk/ChunkedMessageHandler.java b/core-io/src/main/java/com/couchbase/client/core/io/netty/chunk/ChunkedMessageHandler.java index b4516d002..d811880e6 100644 --- a/core-io/src/main/java/com/couchbase/client/core/io/netty/chunk/ChunkedMessageHandler.java +++ b/core-io/src/main/java/com/couchbase/client/core/io/netty/chunk/ChunkedMessageHandler.java @@ -165,7 +165,7 @@ public void write(final ChannelHandlerContext ctx, final Object msg, final Chann chunkResponseParser.updateRequestContext(currentRequest.context()); dispatchTimingStart = System.nanoTime(); if (currentRequest.requestSpan() != null) { - RequestTracer tracer = endpointContext.environment().requestTracer(); + RequestTracer tracer = endpointContext.coreResources().requestTracer(); currentDispatchSpan = tracer.requestSpan(TracingIdentifiers.SPAN_DISPATCH, currentRequest.requestSpan()); if (!CbTracing.isInternalTracer(tracer)) { diff --git a/core-io/src/main/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandler.java b/core-io/src/main/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandler.java index eadcb8418..c4446dc0b 100644 --- a/core-io/src/main/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandler.java +++ b/core-io/src/main/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandler.java @@ -172,7 +172,7 @@ public KeyValueMessageHandler(final BaseEndpoint endpoint, final EndpointContext this.compressionConfig = endpointContext.environment().compressionConfig(); this.eventBus = endpointContext.environment().eventBus(); this.bucketName = bucketName; - this.isInternalTracer = CbTracing.isInternalTracer(endpointContext.environment().requestTracer()); + this.isInternalTracer = CbTracing.isInternalTracer(endpointContext.coreResources().requestTracer()); } /** @@ -226,7 +226,7 @@ public void write(final ChannelHandlerContext ctx, final Object msg, final Chann ctx.write(request.encode(ctx.alloc(), opaque, channelContext), promise); writtenRequestDispatchTimings.put(opaque, (Long) System.nanoTime()); if (request.requestSpan() != null) { - RequestTracer tracer = endpointContext.environment().requestTracer(); + RequestTracer tracer = endpointContext.coreResources().requestTracer(); RequestSpan dispatchSpan = tracer.requestSpan(TracingIdentifiers.SPAN_DISPATCH, request.requestSpan()); if (!isInternalTracer) { diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/Request.java b/core-io/src/main/java/com/couchbase/client/core/msg/Request.java index ccb6c3bcb..6a5288827 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/Request.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/Request.java @@ -18,9 +18,9 @@ import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.deps.io.netty.util.Timeout; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.util.annotation.Nullable; import java.time.Duration; diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/RequestContext.java b/core-io/src/main/java/com/couchbase/client/core/msg/RequestContext.java index 1c86e6bea..1d0e2dd22 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/RequestContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/RequestContext.java @@ -25,7 +25,7 @@ import com.couchbase.client.core.cnc.metrics.NoopMeter; import com.couchbase.client.core.env.Authenticator; import com.couchbase.client.core.env.CoreEnvironment; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.retry.RetryReason; import com.couchbase.client.core.util.HostAndPort; import reactor.util.annotation.Nullable; @@ -404,7 +404,7 @@ public void injectExportableParams(final Map input) { if (lastDispatchedTo != null) { input.put("lastDispatchedTo", redactSystem(lastDispatchedTo)); } else if (lastDispatchedToNode != null) { - input.put("lastDispatchedTo", redactSystem(lastDispatchedToNode.address())); + input.put("lastDispatchedTo", redactSystem(lastDispatchedToNode)); } if (lastDispatchedFrom != null) { diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/RequestTarget.java b/core-io/src/main/java/com/couchbase/client/core/msg/RequestTarget.java index 055f035f1..f556d107a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/RequestTarget.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/RequestTarget.java @@ -17,8 +17,8 @@ package com.couchbase.client.core.msg; import com.couchbase.client.core.annotation.Stability; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import java.util.Objects; @@ -97,7 +97,7 @@ public NodeIdentifier nodeIdentifier() { @Override public String toString() { - String base = serviceType + "@" + redactSystem(nodeIdentifier.address() + ":" + nodeIdentifier.managerPort()); + String base = serviceType + "@" + redactSystem(nodeIdentifier); return bucketName == null ? base : base + "/" + redactMeta(bucketName); } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierBucketConfigRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierBucketConfigRequest.java index b1f5b8207..05b9b1dc7 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierBucketConfigRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierBucketConfigRequest.java @@ -24,8 +24,8 @@ import com.couchbase.client.core.io.netty.kv.KeyValueChannelContext; import com.couchbase.client.core.msg.TargetedRequest; import com.couchbase.client.core.msg.UnmonitoredRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.util.annotation.Nullable; import java.time.Duration; @@ -87,7 +87,7 @@ public boolean idempotent() { public Map serviceContext() { final Map ctx = super.serviceContext(); if (target != null) { - ctx.put("target", redactSystem(target.address())); + ctx.put("target", redactSystem(target)); } return ctx; } @@ -100,7 +100,7 @@ public String name() { @Override public String toString() { return "CarrierBucketConfigRequest{" + - "target=" + redactSystem(target.address()) + + "target=" + redactSystem(target) + ", bucket=" + redactMeta(collectionIdentifier().bucket()) + ", ifNewerThan=" + ifNewerThan + '}'; diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierGlobalConfigRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierGlobalConfigRequest.java index 909dbea79..d7abcafb7 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierGlobalConfigRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/CarrierGlobalConfigRequest.java @@ -25,7 +25,7 @@ import com.couchbase.client.core.io.netty.kv.MemcacheProtocol; import com.couchbase.client.core.msg.TargetedRequest; import com.couchbase.client.core.msg.UnmonitoredRequest; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; import reactor.util.annotation.Nullable; @@ -130,7 +130,7 @@ public boolean idempotent() { public Map serviceContext() { final Map ctx = super.serviceContext(); if (target != null) { - ctx.put("target", redactSystem(target.address())); + ctx.put("target", redactSystem(target)); } return ctx; } @@ -143,7 +143,7 @@ public String name() { @Override public String toString() { return "CarrierGlobalConfigRequest{" + - "target=" + redactSystem(target.address()) + + "target=" + redactSystem(target) + ", ifNewerThan=" + ifNewerThan + '}'; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/KeyValueRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/KeyValueRequest.java index 7fa5f7e68..1396b0c88 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/KeyValueRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/KeyValueRequest.java @@ -45,6 +45,19 @@ public interface KeyValueRequest extends Request, ScopedR */ void partition(short partition); + /** + * Returns the index of the replica set member this request targets. + *

    + *
  • 0 = primary ("active") + *
  • 1 = first replica + *
  • 2 = second replica + *
  • 3 = third replica + *
+ */ + default int replica() { + return 0; + } + /** * Encode this request with the given allocator and opaque. * diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/KvPingRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/KvPingRequest.java index 364bd06cd..5f3159c55 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/KvPingRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/KvPingRequest.java @@ -22,14 +22,12 @@ import com.couchbase.client.core.io.netty.kv.KeyValueChannelContext; import com.couchbase.client.core.io.netty.kv.MemcacheProtocol; import com.couchbase.client.core.msg.TargetedRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; +import com.couchbase.client.core.topology.NodeIdentifier; import java.time.Duration; import java.util.Map; -import java.util.TreeMap; -import static com.couchbase.client.core.logging.RedactableArgument.redactMeta; import static com.couchbase.client.core.logging.RedactableArgument.redactSystem; public class KvPingRequest extends NoopRequest implements TargetedRequest { @@ -56,7 +54,7 @@ public NodeIdentifier target() { public Map serviceContext() { final Map ctx = super.serviceContext(); if (target != null) { - ctx.put("target", redactSystem(target.address())); + ctx.put("target", redactSystem(target)); } return ctx; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/MultiObserveViaCasRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/MultiObserveViaCasRequest.java index dea847b94..7a2bd5e6a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/MultiObserveViaCasRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/MultiObserveViaCasRequest.java @@ -25,8 +25,8 @@ import com.couchbase.client.core.io.netty.kv.MemcacheProtocol; import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.TargetedRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.UnsignedLEB128; import java.time.Duration; @@ -136,7 +136,7 @@ public String name() { @Override public Map serviceContext() { final Map parentCtx = super.serviceContext(); - parentCtx.put("target", target.address()); + parentCtx.put("target", target); parentCtx.put("numKeys", keys.size()); return parentCtx; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaCasRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaCasRequest.java index 6d5489d6a..f7af2775a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaCasRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaCasRequest.java @@ -44,6 +44,7 @@ public ObserveViaCasRequest(final Duration timeout, final CoreContext ctx, Colle this.replica = replica; } + @Override public int replica() { return replica; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaSeqnoRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaSeqnoRequest.java index a89ec6e32..c2bde2f08 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaSeqnoRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ObserveViaSeqnoRequest.java @@ -52,6 +52,7 @@ public ObserveViaSeqnoRequest(final Duration timeout, final CoreContext ctx, Col } } + @Override public int replica() { return replica; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCancelRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCancelRequest.java index 77f4a8d6b..f996851b6 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCancelRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCancelRequest.java @@ -54,7 +54,7 @@ public RangeScanCancelRequest(CoreRangeScanId id, options.commonOptions().retryStrategy().orElse(ctx.environment().retryStrategy()), null, collectionIdentifier, - ctx.environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CANCEL, options.commonOptions().parentSpan().orElse(null))); + ctx.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CANCEL, options.commonOptions().parentSpan().orElse(null))); this.id = id; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanContinueRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanContinueRequest.java index da45c1c74..4fbd90fb7 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanContinueRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanContinueRequest.java @@ -62,7 +62,7 @@ public RangeScanContinueRequest(CoreRangeScanId id, options.commonOptions().retryStrategy().orElse(ctx.environment().retryStrategy()), key, collectionIdentifier, - ctx.environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CONTINUE, options.commonOptions().parentSpan().orElse(null))); + ctx.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CONTINUE, options.commonOptions().parentSpan().orElse(null))); this.id = id; this.itemLimit = options.batchItemLimit(); this.byteLimit = options.batchByteLimit(); diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCreateRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCreateRequest.java index ba924f97b..5c46ec40a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCreateRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/RangeScanCreateRequest.java @@ -88,7 +88,7 @@ public static RangeScanCreateRequest forRangeScan(byte[] startTerm, ctx, options.commonOptions().retryStrategy().orElse(ctx.environment().retryStrategy()), collectionIdentifier, - ctx.environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CREATE, options.commonOptions().parentSpan().orElse(null)), + ctx.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CREATE, options.commonOptions().parentSpan().orElse(null)), partition, Optional.ofNullable(consistencyMap.get(partition))); } @@ -111,7 +111,7 @@ public static RangeScanCreateRequest forSamplingScan(CoreSamplingScan samplingSc ctx, options.commonOptions().retryStrategy().orElse(ctx.environment().retryStrategy()), collectionIdentifier, - ctx.environment().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CREATE, options.commonOptions().parentSpan().orElse(null)), + ctx.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_RANGE_SCAN_CREATE, options.commonOptions().parentSpan().orElse(null)), partition, Optional.ofNullable(consistencyMap.get(partition))); } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaGetRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaGetRequest.java index 47766cf6c..c681af08b 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaGetRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaGetRequest.java @@ -50,7 +50,8 @@ public ReplicaGetRequest(final String key, final Duration timeout, } } - public short replica() { + @Override + public int replica() { return replica; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaSubdocGetRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaSubdocGetRequest.java index 1b1082bef..3c719cabb 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaSubdocGetRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/ReplicaSubdocGetRequest.java @@ -50,7 +50,8 @@ public static ReplicaSubdocGetRequest create(final Duration timeout, final CoreC } } - public short replica() { + @Override + public int replica() { return replica; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetRequest.java index 9a4379068..64f5bc370 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetRequest.java @@ -17,6 +17,7 @@ package com.couchbase.client.core.msg.kv; import com.couchbase.client.core.CoreContext; +import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.api.kv.CoreSubdocGetCommand; import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.cnc.TracingIdentifiers; @@ -102,6 +103,20 @@ static List convertCommands(List commands) { return result; } + @Stability.Internal + public static List convertCommandsToCore(List commands) { + List result = new ArrayList<>(commands.size()); + for (Command cmd : commands) { + result.add(new CoreSubdocGetCommand( + cmd.type, + cmd.path, + cmd.xattr, + cmd.binary + )); + } + return result; + } + @Override public ByteBuf encode(ByteBufAllocator alloc, int opaque, KeyValueChannelContext ctx) { ByteBuf key = null; diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetResponse.java b/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetResponse.java index f8a64642f..ed36efaca 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetResponse.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/kv/SubdocGetResponse.java @@ -16,6 +16,10 @@ package com.couchbase.client.core.msg.kv; +import com.couchbase.client.core.CoreKeyspace; +import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreKvResponseMetadata; +import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.error.CouchbaseException; import com.couchbase.client.core.io.netty.kv.MemcacheProtocol; import com.couchbase.client.core.msg.ResponseStatus; @@ -70,4 +74,15 @@ public String toString() { ", isDeleted=" + isDeleted + '}'; } + + @Stability.Internal + public CoreSubdocGetResult toCore(CoreKeyspace keyspace, String key) { + return new CoreSubdocGetResult(keyspace, + key, + CoreKvResponseMetadata.from(flexibleExtras()), + Arrays.asList(values()), + cas(), + isDeleted() + ); + } } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/manager/BucketConfigRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/manager/BucketConfigRequest.java index d4569bff4..27e2b221e 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/manager/BucketConfigRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/manager/BucketConfigRequest.java @@ -25,8 +25,8 @@ import com.couchbase.client.core.endpoint.http.CoreHttpPath; import com.couchbase.client.core.env.Authenticator; import com.couchbase.client.core.msg.TargetedRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; +import com.couchbase.client.core.topology.NodeIdentifier; import java.time.Duration; import java.util.Map; @@ -84,7 +84,7 @@ public Map serviceContext() { ctx.put("type", serviceType().ident()); ctx.put("bucket", redactMeta(bucketName)); if (target != null) { - ctx.put("target", redactSystem(target.address())); + ctx.put("target", redactSystem(target)); } return ctx; } diff --git a/core-io/src/main/java/com/couchbase/client/core/msg/query/QueryRequest.java b/core-io/src/main/java/com/couchbase/client/core/msg/query/QueryRequest.java index 846508704..dbee82f3a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/msg/query/QueryRequest.java +++ b/core-io/src/main/java/com/couchbase/client/core/msg/query/QueryRequest.java @@ -36,9 +36,9 @@ import com.couchbase.client.core.msg.BaseRequest; import com.couchbase.client.core.msg.HttpRequest; import com.couchbase.client.core.msg.ResponseStatus; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; diff --git a/core-io/src/main/java/com/couchbase/client/core/node/KeyValueLocator.java b/core-io/src/main/java/com/couchbase/client/core/node/KeyValueLocator.java index 8ed561cd8..a48b864e9 100644 --- a/core-io/src/main/java/com/couchbase/client/core/node/KeyValueLocator.java +++ b/core-io/src/main/java/com/couchbase/client/core/node/KeyValueLocator.java @@ -30,14 +30,12 @@ import com.couchbase.client.core.msg.Response; import com.couchbase.client.core.msg.kv.DurabilityLevel; import com.couchbase.client.core.msg.kv.KeyValueRequest; -import com.couchbase.client.core.msg.kv.ObserveViaSeqnoRequest; import com.couchbase.client.core.msg.kv.PredeterminedPartitionRequest; -import com.couchbase.client.core.msg.kv.ReplicaGetRequest; -import com.couchbase.client.core.msg.kv.ReplicaSubdocGetRequest; import com.couchbase.client.core.msg.kv.SyncDurabilityRequest; import com.couchbase.client.core.retry.AuthErrorDecider; import com.couchbase.client.core.retry.RetryOrchestrator; import com.couchbase.client.core.retry.RetryReason; +import com.couchbase.client.core.topology.NodeIdentifier; import java.util.List; import java.util.Optional; @@ -155,7 +153,7 @@ private static void couchbaseBucket(final KeyValueRequest request, final List NodeInfo nodeInfo = config.nodeAtIndex(nodeId); for (Node node : nodes) { - if (node.identifier().equals(nodeInfo.identifier())) { + if (node.identifier().equals(nodeInfo.id())) { node.send(request); return; } @@ -204,15 +202,10 @@ private static int calculateNodeId(int partitionId, final KeyValueRequest req // having the request being stuck on the server side during rebalance. boolean useFastForward = config.hasFastForwardMap() && request.rejectedWithNotMyVbucket() > 0; - if (request instanceof ReplicaGetRequest) { - return config.nodeIndexForReplica(partitionId, ((ReplicaGetRequest) request).replica() - 1, useFastForward); - } else if (request instanceof ReplicaSubdocGetRequest) { - return config.nodeIndexForReplica(partitionId, ((ReplicaSubdocGetRequest) request).replica() - 1, useFastForward); - } else if (request instanceof ObserveViaSeqnoRequest && ((ObserveViaSeqnoRequest) request).replica() > 0) { - return config.nodeIndexForReplica(partitionId, ((ObserveViaSeqnoRequest) request).replica() - 1, useFastForward); - } else { - return config.nodeIndexForActive(partitionId, useFastForward); - } + int replica = request.replica(); + return replica == 0 + ? config.nodeIndexForActive(partitionId, useFastForward) + : config.nodeIndexForReplica(partitionId, replica - 1, useFastForward); } @@ -229,7 +222,7 @@ private static void memcacheBucket(final KeyValueRequest request, final List< return; } - NodeIdentifier identifier = config.nodeForKey(request.key()).identifier(); + NodeIdentifier identifier = config.nodeForKey(request.key()).id(); request.partition((short) 0); for (Node node : nodes) { diff --git a/core-io/src/main/java/com/couchbase/client/core/node/Node.java b/core-io/src/main/java/com/couchbase/client/core/node/Node.java index 0bcb95aa8..08e020123 100644 --- a/core-io/src/main/java/com/couchbase/client/core/node/Node.java +++ b/core-io/src/main/java/com/couchbase/client/core/node/Node.java @@ -54,10 +54,11 @@ import com.couchbase.client.core.service.ServiceType; import com.couchbase.client.core.service.ViewService; import com.couchbase.client.core.service.ViewServiceConfig; +import com.couchbase.client.core.topology.NodeIdentifier; +import com.couchbase.client.core.util.AtomicEnumSet; import com.couchbase.client.core.util.CompositeStateful; import com.couchbase.client.core.util.HostAndPort; import com.couchbase.client.core.util.NanoTimestamp; -import com.couchbase.client.core.util.AtomicEnumSet; import com.couchbase.client.core.util.Stateful; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; diff --git a/core-io/src/main/java/com/couchbase/client/core/node/NodeContext.java b/core-io/src/main/java/com/couchbase/client/core/node/NodeContext.java index 4647347c2..f459b97f2 100644 --- a/core-io/src/main/java/com/couchbase/client/core/node/NodeContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/node/NodeContext.java @@ -17,6 +17,7 @@ package com.couchbase.client.core.node; import com.couchbase.client.core.CoreContext; +import com.couchbase.client.core.topology.NodeIdentifier; import java.util.Map; @@ -24,25 +25,27 @@ public class NodeContext extends CoreContext { - /** - * The hostname of this node. - */ - private final NodeIdentifier nodeIdentifier; + private final com.couchbase.client.core.node.NodeIdentifier legacyNodeIdentifier; public NodeContext(CoreContext ctx, NodeIdentifier nodeIdentifier) { super(ctx.core(), ctx.id(), ctx.environment(), ctx.authenticator()); - this.nodeIdentifier = nodeIdentifier; + this.legacyNodeIdentifier = nodeIdentifier.toLegacy(); } + /** + * @deprecated This is the node's canonical hostname; it's not useful by itself, + * since it does not uniquely identify a node. + */ + @Deprecated public String remoteHostname() { - return nodeIdentifier.address(); + return legacyNodeIdentifier.address(); } @Override public void injectExportableParams(final Map input) { super.injectExportableParams(input); input.put("remote", redactSystem(remoteHostname())); - input.put("managerPort", redactSystem(nodeIdentifier.managerPort())); + input.put("managerPort", redactSystem(legacyNodeIdentifier.managerPort())); } diff --git a/core-io/src/main/java/com/couchbase/client/core/node/NodeIdentifier.java b/core-io/src/main/java/com/couchbase/client/core/node/NodeIdentifier.java index 9e7d4486d..3ba18345d 100644 --- a/core-io/src/main/java/com/couchbase/client/core/node/NodeIdentifier.java +++ b/core-io/src/main/java/com/couchbase/client/core/node/NodeIdentifier.java @@ -28,14 +28,17 @@ /** * Uniquely identifies a node within the cluster, using the node's * host and manager port from the default network. + * + * @deprecated In favor of {@link com.couchbase.client.core.topology.NodeIdentifier} */ +@Deprecated public class NodeIdentifier { private final String canonicalHost; private final int canonicalManagerPort; - // Nullable only when created by a - @Nullable private final String hostForNetworkConnections; + // Null only when created by a legacy global/bucket config parser + @Nullable private final com.couchbase.client.core.topology.NodeIdentifier topologyNodeIdentifier; @Deprecated public NodeIdentifier( @@ -44,15 +47,7 @@ public NodeIdentifier( ) { this.canonicalHost = canonicalHost; this.canonicalManagerPort = canonicalManagerPort; - this.hostForNetworkConnections = null; - } - - @Stability.Internal - public static NodeIdentifier forBootstrap(String bootstrapHost, int bootstrapPort) { - // This address isn't really "canonical", since it may be an "external" address. - // If it's an external address, the node created from this identifier will be discarded - // when the config with the _real_ canonical addresses is applied. - return new NodeIdentifier(new HostAndPort(bootstrapHost, bootstrapPort), bootstrapHost); + this.topologyNodeIdentifier = null; } public NodeIdentifier( @@ -61,7 +56,7 @@ public NodeIdentifier( ) { this.canonicalHost = canonicalAddress.host(); this.canonicalManagerPort = canonicalAddress.port(); - this.hostForNetworkConnections = requireNonNull(hostForNetworkConnections); + this.topologyNodeIdentifier = new com.couchbase.client.core.topology.NodeIdentifier(canonicalHost, canonicalManagerPort, hostForNetworkConnections); } /** @@ -72,13 +67,17 @@ public NodeIdentifier( * @throws NoSuchElementException if this info is not available */ public String hostForNetworkConnections() throws NoSuchElementException { - if (hostForNetworkConnections == null) { - throw new NoSuchElementException( - "This NodeIdentifier (" + this + ") doesn't have the host to use for network connections." + - " It might have been created by a legacy config parser or some other component that did not specify it." + return asTopologyNodeIdentifier().hostForNetworkConnections(); + } + + @Stability.Internal + public com.couchbase.client.core.topology.NodeIdentifier asTopologyNodeIdentifier() { + if (topologyNodeIdentifier == null) { + throw new NoSuchElementException("This NodeIdentifier (" + this + ") doesn't have the host to use for network connections." + + " It might have been created by a legacy config parser or some other component that did not specify it." ); } - return hostForNetworkConnections; + return topologyNodeIdentifier; } /** @@ -96,7 +95,7 @@ public int managerPort() { public String toString() { return "NodeIdentifier{" + "canonicalAddress=" + redactSystem(canonicalHost + ":" + canonicalManagerPort) + - ", hostForNetworkConnections=" + hostForNetworkConnections + + ", hostForNetworkConnections=" + (topologyNodeIdentifier == null ? null : hostForNetworkConnections()) + "}"; } diff --git a/core-io/src/main/java/com/couchbase/client/core/node/RoundRobinLocator.java b/core-io/src/main/java/com/couchbase/client/core/node/RoundRobinLocator.java index 7994a1696..733c273d3 100644 --- a/core-io/src/main/java/com/couchbase/client/core/node/RoundRobinLocator.java +++ b/core-io/src/main/java/com/couchbase/client/core/node/RoundRobinLocator.java @@ -18,9 +18,7 @@ import com.couchbase.client.core.CoreContext; import com.couchbase.client.core.cnc.events.node.NodeLocatorBugIdentifiedEvent; -import com.couchbase.client.core.config.BucketConfig; import com.couchbase.client.core.config.ClusterConfig; -import com.couchbase.client.core.config.NodeInfo; import com.couchbase.client.core.config.PortInfo; import com.couchbase.client.core.error.FeatureNotAvailableException; import com.couchbase.client.core.error.ServiceNotAvailableException; @@ -32,6 +30,7 @@ import com.couchbase.client.core.retry.RetryOrchestrator; import com.couchbase.client.core.retry.RetryReason; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import java.util.ArrayList; import java.util.List; @@ -140,21 +139,14 @@ public void dispatch(final Request request, final List private boolean serviceShowsUpInConfig(final ClusterConfig clusterConfig) { if (clusterConfig.globalConfig() != null) { for (PortInfo portInfo : clusterConfig.globalConfig().portInfos()) { - if (portInfo.ports().containsKey(serviceType)) { + if (portInfo.ports().containsKey(serviceType) || portInfo.sslPorts().containsKey(serviceType)) { return true; } } } - for (BucketConfig bucketConfig : clusterConfig.bucketConfigs().values()) { - for (NodeInfo nodeInfo : bucketConfig.nodes()) { - if (nodeInfo.services().containsKey(serviceType)) { - return true; - } - } - } - - return false; + return clusterConfig.bucketConfigs().values().stream() + .anyMatch(it -> it.serviceEnabled(serviceType)); } /** diff --git a/core-io/src/main/java/com/couchbase/client/core/node/ViewLocator.java b/core-io/src/main/java/com/couchbase/client/core/node/ViewLocator.java index caf0dee29..ceabdb914 100644 --- a/core-io/src/main/java/com/couchbase/client/core/node/ViewLocator.java +++ b/core-io/src/main/java/com/couchbase/client/core/node/ViewLocator.java @@ -68,7 +68,7 @@ protected boolean nodeCanBeUsed(final Node node, final Request ret, RequestSpan dispatchSpan = createDispatchSpan(core, request, endpoint); long start = System.nanoTime(); - AutoCloseable scope = activateSpan(Optional.empty(), dispatchSpan, core.context().environment().requestTracer()); + AutoCloseable scope = activateSpan(Optional.empty(), dispatchSpan, core.context().coreResources().requestTracer()); // Make the Protostellar call. ListenableFuture response = executeFutureGrpcCall.apply(endpoint); @@ -244,7 +244,7 @@ void reactiveInternal(Sinks.One ret, RequestSpan dispatchSpan = createDispatchSpan(core, request, endpoint); long start = System.nanoTime(); - AutoCloseable scope = activateSpan(Optional.empty(), dispatchSpan, core.context().environment().requestTracer()); + AutoCloseable scope = activateSpan(Optional.empty(), dispatchSpan, core.context().coreResources().requestTracer()); // Make the Protostellar call. ListenableFuture response = executeFutureGrpcCall.apply(endpoint); @@ -317,7 +317,7 @@ private static void handleDispatchSpan(@Nullable ProtostellarRequestBehaviour be private static @Nullable RequestSpan createDispatchSpan(CoreProtostellar core, ProtostellarRequest request, ProtostellarEndpoint endpoint) { - RequestTracer tracer = core.context().environment().requestTracer(); + RequestTracer tracer = core.context().coreResources().requestTracer(); RequestSpan dispatchSpan; if (!CbTracing.isInternalTracer(tracer)) { dispatchSpan = tracer.requestSpan(TracingIdentifiers.SPAN_DISPATCH, request.span()); diff --git a/core-io/src/main/java/com/couchbase/client/core/protostellar/CoreProtostellarUtil.java b/core-io/src/main/java/com/couchbase/client/core/protostellar/CoreProtostellarUtil.java index 65073f78f..2a3ace949 100644 --- a/core-io/src/main/java/com/couchbase/client/core/protostellar/CoreProtostellarUtil.java +++ b/core-io/src/main/java/com/couchbase/client/core/protostellar/CoreProtostellarUtil.java @@ -186,7 +186,7 @@ public static RequestSpan createSpan(CoreProtostellar core, String spanName, CoreDurability durability, @Nullable RequestSpan parent) { - RequestSpan span = CbTracing.newSpan(core.context().environment().requestTracer(), spanName, parent); + RequestSpan span = core.context().coreResources().requestTracer().requestSpan(spanName, parent); if (!durability.isNone() && !durability.isLegacy()) { switch (durability.levelIfSynchronous().get()) { diff --git a/core-io/src/main/java/com/couchbase/client/core/protostellar/ProtostellarContext.java b/core-io/src/main/java/com/couchbase/client/core/protostellar/ProtostellarContext.java index 3a9f430b5..3c60c178a 100644 --- a/core-io/src/main/java/com/couchbase/client/core/protostellar/ProtostellarContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/protostellar/ProtostellarContext.java @@ -16,6 +16,7 @@ package com.couchbase.client.core.protostellar; +import com.couchbase.client.core.CoreResources; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.cnc.AbstractContext; import com.couchbase.client.core.env.Authenticator; @@ -34,10 +35,12 @@ public final class ProtostellarContext extends AbstractContext { private final CoreEnvironment env; private final Authenticator authenticator; + private final CoreResources coreResources; - public ProtostellarContext(final CoreEnvironment env, final Authenticator authenticator) { + public ProtostellarContext(final CoreEnvironment env, final Authenticator authenticator, final CoreResources coreResources) { this.env = requireNonNull(env); this.authenticator = requireNonNull(authenticator); + this.coreResources = requireNonNull(coreResources); if (env.securityConfig().tlsEnabled() && !authenticator.supportsTls()) { throw InvalidArgumentException.fromMessage("TLS enabled but the Authenticator does not support TLS!"); @@ -58,6 +61,10 @@ public CoreEnvironment environment() { return env; } + public CoreResources coreResources() { + return coreResources; + } + public Authenticator authenticator() { return authenticator; } diff --git a/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/CoreProtostellarKeyValueRequests.java b/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/CoreProtostellarKeyValueRequests.java index b15a60be4..6aae54a73 100644 --- a/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/CoreProtostellarKeyValueRequests.java +++ b/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/CoreProtostellarKeyValueRequests.java @@ -369,7 +369,7 @@ public static ProtostellarRequest upsertRequest(CoreProtostellar } private static ProtostellarCoreEncodedContent encodedContent(CoreProtostellar core, Supplier content, RequestSpan span, CompressionConfig compressionConfig) { - RequestSpan encodeSpan = CbTracing.newSpan(core.context().environment().requestTracer(), TracingIdentifiers.SPAN_REQUEST_ENCODING, span); + RequestSpan encodeSpan = core.context().coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_ENCODING, span); long start = System.nanoTime(); CoreEncodedContent encoded; boolean compressed = false; diff --git a/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/ProtostellarCoreKvOps.java b/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/ProtostellarCoreKvOps.java index 832bf2963..fce5d5ea5 100644 --- a/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/ProtostellarCoreKvOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/protostellar/kv/ProtostellarCoreKvOps.java @@ -32,6 +32,7 @@ import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.api.kv.CoreSubdocMutateCommand; import com.couchbase.client.core.api.kv.CoreSubdocMutateResult; +import com.couchbase.client.core.api.kv.CoreReadPreference; import com.couchbase.client.core.endpoint.http.CoreCommonOptions; import com.couchbase.client.core.env.CompressionConfig; import com.couchbase.client.core.error.FeatureNotAvailableException; @@ -308,25 +309,25 @@ public Mono subdocGetReactive(CoreCommonOptions common, Str @Override - public Flux subdocGetAllReplicasReactive(CoreCommonOptions common, String key, List commands) { + public Flux subdocGetAllReplicasReactive(CoreCommonOptions common, String key, List commands, CoreReadPreference readPreference) { // Protostellar subdoc-from-replica support is currently incomplete. throw unsupported(); } @Override - public Mono subdocGetAnyReplicaReactive(CoreCommonOptions common, String key, List commands) { + public Mono subdocGetAnyReplicaReactive(CoreCommonOptions common, String key, List commands, CoreReadPreference readPreference) { // Protostellar subdoc-from-replica support is currently incomplete. throw unsupported(); } @Override - public Flux getAllReplicasReactive(CoreCommonOptions common, String key) { + public Flux getAllReplicasReactive(CoreCommonOptions common, String key, CoreReadPreference readPreference) { // Protostellar get-from-replica support is currently incomplete. JVMCBC-1263. throw unsupported(); } @Override - public Mono getAnyReplicaReactive(CoreCommonOptions common, String key) { + public Mono getAnyReplicaReactive(CoreCommonOptions common, String key, CoreReadPreference readPreference) { // Protostellar get-from-replica support is currently incomplete. JVMCBC-1263. throw unsupported(); } diff --git a/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryOps.java b/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryOps.java index 127b37ddf..d2072520d 100644 --- a/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryOps.java +++ b/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryOps.java @@ -39,11 +39,11 @@ import com.couchbase.client.core.logging.RedactableArgument; import com.couchbase.client.core.msg.kv.MutationToken; import com.couchbase.client.core.msg.query.QueryChunkRow; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.protostellar.CoreProtostellarAccessorsStreaming; import com.couchbase.client.core.protostellar.CoreProtostellarErrorHandlingUtil; import com.couchbase.client.core.protostellar.ProtostellarRequest; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.ProtostellarUtil; import com.couchbase.client.protostellar.query.v1.QueryRequest; import com.couchbase.client.protostellar.query.v1.QueryResponse; diff --git a/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryResult.java b/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryResult.java index 1ae97b549..d91388eeb 100644 --- a/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryResult.java +++ b/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreQueryResult.java @@ -19,7 +19,7 @@ import com.couchbase.client.core.api.query.CoreQueryMetaData; import com.couchbase.client.core.api.query.CoreQueryResult; import com.couchbase.client.core.msg.query.QueryChunkRow; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.protostellar.query.v1.QueryResponse; import java.util.List; diff --git a/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreReactiveQueryResult.java b/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreReactiveQueryResult.java index 47126cac2..6986b678d 100644 --- a/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreReactiveQueryResult.java +++ b/core-io/src/main/java/com/couchbase/client/core/protostellar/query/ProtostellarCoreReactiveQueryResult.java @@ -19,7 +19,7 @@ import com.couchbase.client.core.api.query.CoreQueryMetaData; import com.couchbase.client.core.api.query.CoreReactiveQueryResult; import com.couchbase.client.core.msg.query.QueryChunkRow; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; diff --git a/core-io/src/main/java/com/couchbase/client/core/service/Service.java b/core-io/src/main/java/com/couchbase/client/core/service/Service.java index 3db3998ed..6e4b9dc0c 100644 --- a/core-io/src/main/java/com/couchbase/client/core/service/Service.java +++ b/core-io/src/main/java/com/couchbase/client/core/service/Service.java @@ -84,10 +84,7 @@ public interface Service extends Stateful { * Returns the remote address for this service. */ default HostAndPort address() { - return new HostAndPort( - context().remoteHostname(), - context().remotePort() - ); + return context().remote(); } @Stability.Internal diff --git a/core-io/src/main/java/com/couchbase/client/core/service/ServiceContext.java b/core-io/src/main/java/com/couchbase/client/core/service/ServiceContext.java index b3a05ee5c..c86e186f5 100644 --- a/core-io/src/main/java/com/couchbase/client/core/service/ServiceContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/service/ServiceContext.java @@ -17,23 +17,14 @@ package com.couchbase.client.core.service; import com.couchbase.client.core.CoreContext; +import com.couchbase.client.core.util.HostAndPort; import java.util.Map; import java.util.Optional; -import static com.couchbase.client.core.logging.RedactableArgument.redactSystem; - public class ServiceContext extends CoreContext { - /** - * The hostname of this service. - */ - private final String remoteHostname; - - /** - * The port of this service. - */ - private final int remotePort; + private final HostAndPort remote; /** * The service type of this context. @@ -45,24 +36,35 @@ public class ServiceContext extends CoreContext { public ServiceContext(CoreContext ctx, String remoteHostname, int remotePort, ServiceType serviceType, Optional bucket) { super(ctx.core(), ctx.id(), ctx.environment(), ctx.authenticator()); - this.remoteHostname = remoteHostname; - this.remotePort = remotePort; this.bucket = bucket; this.serviceType = serviceType; + this.remote = new HostAndPort(remoteHostname, remotePort); } + /** + * @deprecated In favor of {@link #remote()} + */ + @Deprecated public String remoteHostname() { - return remoteHostname; + return remote.host(); } + /** + * @deprecated In favor of {@link #remote()} + */ + @Deprecated public int remotePort() { - return remotePort; + return remote.port(); + } + + public HostAndPort remote() { + return remote; } @Override public void injectExportableParams(final Map input) { super.injectExportableParams(input); - input.put("remote", redactSystem(remoteHostname() + ":" + remotePort())); + input.put("remote", remote.toString()); input.put("type", serviceType); bucket.ifPresent(b -> input.put("bucket", b)); } diff --git a/core-io/src/main/java/com/couchbase/client/core/service/kv/NodeIndexCalculator.java b/core-io/src/main/java/com/couchbase/client/core/service/kv/NodeIndexCalculator.java new file mode 100644 index 000000000..e381c57d1 --- /dev/null +++ b/core-io/src/main/java/com/couchbase/client/core/service/kv/NodeIndexCalculator.java @@ -0,0 +1,84 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.couchbase.client.core.service.kv; + +import com.couchbase.client.core.CoreContext; +import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreReadPreference; +import com.couchbase.client.core.config.CouchbaseBucketConfig; +import com.couchbase.client.core.config.PortInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.charset.StandardCharsets; + +import static com.couchbase.client.core.node.KeyValueLocator.partitionForKey; + +@Stability.Internal +public class NodeIndexCalculator { + private final boolean[] allowedNodeIndexes; + private final CouchbaseBucketConfig topology; + private final static Logger logger = LoggerFactory.getLogger(NodeIndexCalculator.class); + + public NodeIndexCalculator(CoreReadPreference readPreference, CouchbaseBucketConfig topology, CoreContext coreContext) { + boolean[] allowedNodeIndexesOut = new boolean[topology.nodes().size()]; + + for (int nodeIndex = 0; nodeIndex < topology.portInfos().size(); nodeIndex++) { + boolean canUseNode = true; + PortInfo node = topology.portInfos().get(nodeIndex); + + if (readPreference == CoreReadPreference.PREFERRED_SERVER_GROUP) { + canUseNode = node.serverGroup() != null && node.serverGroup().equals(coreContext.environment().preferredServerGroup()); + } + + allowedNodeIndexesOut[nodeIndex] = canUseNode; + } + + this.allowedNodeIndexes = allowedNodeIndexesOut; + this.topology = topology; + } + + public boolean canUseNode(String documentId, int replicaIndex, boolean isActive) { + boolean useFastForward = false; + int partitionId = partitionForKey(documentId.getBytes(StandardCharsets.UTF_8), topology.numberOfPartitions()); + int nodeIndex; + + if (isActive) { + nodeIndex = topology.nodeIndexForActive(partitionId, useFastForward); + } else { + nodeIndex = topology.nodeIndexForReplica(partitionId, replicaIndex, useFastForward); + } + + logger.trace("Checking whether doc can use node. doc={} isActive={} replica={} pid={} ni={} allowed={}", documentId, isActive, replicaIndex, partitionId, nodeIndex, allowedNodeIndexes); + + return check(nodeIndex); + } + + public boolean canUseNodeForActive(String documentId) { + return canUseNode(documentId, 0, true); + } + + public boolean canUseNodeForReplica(String documentId, int replicaIndex) { + return canUseNode(documentId, replicaIndex, false); + } + + public boolean check(int nodeIndex) { + if (nodeIndex < 0 || nodeIndex > allowedNodeIndexes.length) { + return false; + } + return allowedNodeIndexes[nodeIndex]; + } +} diff --git a/core-io/src/main/java/com/couchbase/client/core/service/kv/Observe.java b/core-io/src/main/java/com/couchbase/client/core/service/kv/Observe.java index ab84aedc9..ff6630090 100644 --- a/core-io/src/main/java/com/couchbase/client/core/service/kv/Observe.java +++ b/core-io/src/main/java/com/couchbase/client/core/service/kv/Observe.java @@ -54,7 +54,7 @@ public static Mono poll(final ObserveContext ctx) { } final RequestSpan parentSpan = ctx - .environment() + .coreResources() .requestTracer() .requestSpan("observe", ctx.parentSpan()); @@ -79,7 +79,7 @@ private static Flux viaMutationToken(final int bucketReplicas, fina List requests = new ArrayList<>(); if (ctx.persistTo() != ObservePersistTo.NONE) { - final RequestSpan span = ctx.environment().requestTracer() + final RequestSpan span = ctx.coreResources().requestTracer() .requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_OBSERVE, parent); requests.add(new ObserveViaSeqnoRequest(timeout, ctx, ctx.collectionIdentifier(), retryStrategy, 0, true, mutationToken.partitionUUID(), id, span)); @@ -87,7 +87,7 @@ private static Flux viaMutationToken(final int bucketReplicas, fina if (ctx.persistTo().touchesReplica() || ctx.replicateTo().touchesReplica()) { for (short i = 1; i <= bucketReplicas; i++) { - final RequestSpan span = ctx.environment().requestTracer() + final RequestSpan span = ctx.coreResources().requestTracer() .requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_OBSERVE, parent); requests.add(new ObserveViaSeqnoRequest(timeout, ctx, ctx.collectionIdentifier(), retryStrategy, i, false, mutationToken.partitionUUID(), id, span)); diff --git a/core-io/src/main/java/com/couchbase/client/core/service/kv/ReplicaHelper.java b/core-io/src/main/java/com/couchbase/client/core/service/kv/ReplicaHelper.java index 7ab7f68af..c0e546568 100644 --- a/core-io/src/main/java/com/couchbase/client/core/service/kv/ReplicaHelper.java +++ b/core-io/src/main/java/com/couchbase/client/core/service/kv/ReplicaHelper.java @@ -24,6 +24,7 @@ import com.couchbase.client.core.api.kv.CoreKvResponseMetadata; import com.couchbase.client.core.api.kv.CoreSubdocGetCommand; import com.couchbase.client.core.api.kv.CoreSubdocGetResult; +import com.couchbase.client.core.api.kv.CoreReadPreference; import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.cnc.TracingIdentifiers; import com.couchbase.client.core.cnc.events.request.IndividualReplicaGetFailedEvent; @@ -106,21 +107,21 @@ public static Flux getAllReplicasReactive( final Duration timeout, final RetryStrategy retryStrategy, Map clientContext, - RequestSpan parentSpan + RequestSpan parentSpan, + CoreReadPreference readPreference ) { notNullOrEmpty(documentId, "Id", () -> ReducedKeyValueErrorContext.create(documentId, collectionIdentifier)); - CoreEnvironment env = core.context().environment(); - RequestSpan getAllSpan = env.requestTracer().requestSpan(TracingIdentifiers.SPAN_GET_ALL_REPLICAS, parentSpan); + RequestSpan getAllSpan = core.context().coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_GET_ALL_REPLICAS, parentSpan); return Reactor - .toMono(() -> getAllReplicasRequests(core, collectionIdentifier, documentId, clientContext, retryStrategy, timeout, getAllSpan)) + .toMono(() -> getAllReplicasRequests(core, collectionIdentifier, documentId, clientContext, retryStrategy, timeout, getAllSpan, readPreference)) .flux() .flatMap(Flux::fromStream) .flatMap(request -> Reactor .wrap(request, get(core, request), true) .onErrorResume(t -> { - env.eventBus().publish(new IndividualReplicaGetFailedEvent(request.context())); + core.environment().eventBus().publish(new IndividualReplicaGetFailedEvent(request.context())); return Mono.empty(); // Swallow any errors from individual replicas }) .map(response -> new GetReplicaResponse(response, request instanceof ReplicaGetRequest)) @@ -147,14 +148,15 @@ public static Flux lookupInAllReplicasReactive( final Duration timeout, final RetryStrategy retryStrategy, Map clientContext, - RequestSpan parentSpan + RequestSpan parentSpan, + CoreReadPreference readPreference ) { notNullOrEmpty(documentId, "Id", () -> ReducedKeyValueErrorContext.create(documentId, collectionIdentifier)); CoreEnvironment env = core.context().environment(); - RequestSpan getAllSpan = env.requestTracer().requestSpan(TracingIdentifiers.SPAN_LOOKUP_IN_ALL_REPLICAS, parentSpan); + RequestSpan getAllSpan = core.context().coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_LOOKUP_IN_ALL_REPLICAS, parentSpan); return Reactor - .toMono(() -> lookupInAllReplicasRequests(core, collectionIdentifier, documentId, commands, clientContext, retryStrategy, timeout, getAllSpan)) + .toMono(() -> lookupInAllReplicasRequests(core, collectionIdentifier, documentId, commands, clientContext, retryStrategy, timeout, getAllSpan, readPreference)) .flux() .flatMap(Flux::fromStream) .flatMap(request -> Reactor @@ -184,12 +186,12 @@ public static CompletableFuture>> getAllReplicasAs final RetryStrategy retryStrategy, final Map clientContext, final RequestSpan parentSpan, + final CoreReadPreference readPreference, final Function responseMapper ) { - CoreEnvironment env = core.context().environment(); - RequestSpan getAllSpan = env.requestTracer().requestSpan(TracingIdentifiers.SPAN_LOOKUP_IN_ALL_REPLICAS, parentSpan); + RequestSpan getAllSpan = core.context().coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_LOOKUP_IN_ALL_REPLICAS, parentSpan); - return getAllReplicasRequests(core, collectionIdentifier, documentId, clientContext, retryStrategy, timeout, getAllSpan) + return getAllReplicasRequests(core, collectionIdentifier, documentId, clientContext, retryStrategy, timeout, getAllSpan, readPreference) .thenApply(stream -> stream.map(request -> get(core, request) @@ -232,12 +234,12 @@ public static CompletableFuture>> lookupInAllRepli final RetryStrategy retryStrategy, final Map clientContext, final RequestSpan parentSpan, + final CoreReadPreference readPreference, final Function responseMapper ) { - CoreEnvironment env = core.context().environment(); - RequestSpan getAllSpan = env.requestTracer().requestSpan(TracingIdentifiers.SPAN_GET_ALL_REPLICAS, parentSpan); + RequestSpan getAllSpan = core.context().coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_GET_ALL_REPLICAS, parentSpan); - return lookupInAllReplicasRequests(core, collectionIdentifier, documentId, commands, clientContext, retryStrategy, timeout, getAllSpan) + return lookupInAllReplicasRequests(core, collectionIdentifier, documentId, commands, clientContext, retryStrategy, timeout, getAllSpan, readPreference) .thenApply(stream -> stream.map(request -> get(core, request) @@ -269,13 +271,14 @@ public static CompletableFuture getAnyReplicaAsync( final RetryStrategy retryStrategy, final Map clientContext, final RequestSpan parentSpan, + final CoreReadPreference readPreference, final Function responseMapper) { - RequestSpan getAnySpan = core.context().environment().requestTracer() + RequestSpan getAnySpan = core.context().coreResources().requestTracer() .requestSpan(TracingIdentifiers.SPAN_GET_ANY_REPLICA, parentSpan); CompletableFuture>> listOfFutures = getAllReplicasAsync( - core, collectionIdentifier, documentId, timeout, retryStrategy, clientContext, getAnySpan, responseMapper + core, collectionIdentifier, documentId, timeout, retryStrategy, clientContext, getAnySpan, readPreference, responseMapper ); // Aggregating the futures here will discard the individual errors, which we don't need @@ -304,13 +307,14 @@ public static CompletableFuture lookupInAnyReplicaAsync( final RetryStrategy retryStrategy, final Map clientContext, final RequestSpan parentSpan, + final CoreReadPreference readPreference, final Function responseMapper) { - RequestSpan getAnySpan = core.context().environment().requestTracer() + RequestSpan getAnySpan = core.context().coreResources().requestTracer() .requestSpan(TracingIdentifiers.SPAN_LOOKUP_IN_ANY_REPLICA, parentSpan); CompletableFuture>> listOfFutures = lookupInAllReplicasAsync( - core, collectionIdentifier, documentId, commands, timeout, retryStrategy, clientContext, getAnySpan, responseMapper + core, collectionIdentifier, documentId, commands, timeout, retryStrategy, clientContext, getAnySpan, readPreference, responseMapper ); // Aggregating the futures here will discard the individual errors, which we don't need @@ -372,7 +376,8 @@ public static CompletableFuture> getAllReplicasRequests( final Map clientContext, final RetryStrategy retryStrategy, final Duration timeout, - final RequestSpan parent + final RequestSpan parent, + final CoreReadPreference readPreference ) { notNullOrEmpty(documentId, "Id"); @@ -381,21 +386,32 @@ public static CompletableFuture> getAllReplicasRequests( final BucketConfig config = core.clusterConfig().bucketConfig(collectionIdentifier.bucket()); if (config instanceof CouchbaseBucketConfig) { - int numReplicas = ((CouchbaseBucketConfig) config).numberOfReplicas(); + CouchbaseBucketConfig topology = (CouchbaseBucketConfig) config; + int numReplicas = topology.numberOfReplicas(); List requests = new ArrayList<>(numReplicas + 1); + NodeIndexCalculator allowedNodeIndexes = new NodeIndexCalculator(readPreference, topology, coreContext); - RequestSpan span = environment.requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_GET, parent); - GetRequest activeRequest = new GetRequest(documentId, timeout, coreContext, collectionIdentifier, retryStrategy, span); - activeRequest.context().clientContext(clientContext); - requests.add(activeRequest); + if (allowedNodeIndexes.canUseNodeForActive(documentId)) { + RequestSpan span = coreContext.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_GET, parent); + GetRequest activeRequest = new GetRequest(documentId, timeout, coreContext, collectionIdentifier, retryStrategy, span); + activeRequest.context().clientContext(clientContext); + requests.add(activeRequest); + } for (short replica = 1; replica <= numReplicas; replica++) { - RequestSpan replicaSpan = environment.requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_GET_REPLICA, parent); - ReplicaGetRequest replicaRequest = new ReplicaGetRequest( + if (allowedNodeIndexes.canUseNodeForReplica(documentId, replica - 1)) { + RequestSpan replicaSpan = coreContext.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_GET_REPLICA, parent); + ReplicaGetRequest replicaRequest = new ReplicaGetRequest( documentId, timeout, coreContext, collectionIdentifier, retryStrategy, replica, replicaSpan - ); - replicaRequest.context().clientContext(clientContext); - requests.add(replicaRequest); + ); + replicaRequest.context().clientContext(clientContext); + requests.add(replicaRequest); + } + } + if (requests.isEmpty()) { + CompletableFuture> future = new CompletableFuture<>(); + future.completeExceptionally(DocumentUnretrievableException.noReplicasSuitable()); + return future; } return CompletableFuture.completedFuture(requests.stream()); } else if (config == null) { @@ -404,7 +420,7 @@ public static CompletableFuture> getAllReplicasRequests( final Duration retryDelay = Duration.ofMillis(100); final CompletableFuture> future = new CompletableFuture<>(); coreContext.environment().timer().schedule(() -> { - getAllReplicasRequests(core, collectionIdentifier, documentId, clientContext, retryStrategy, timeout.minus(retryDelay), parent).whenComplete((getRequestStream, throwable) -> { + getAllReplicasRequests(core, collectionIdentifier, documentId, clientContext, retryStrategy, timeout.minus(retryDelay), parent, readPreference).whenComplete((getRequestStream, throwable) -> { if (throwable != null) { future.completeExceptionally(throwable); } else { @@ -441,7 +457,8 @@ public static CompletableFuture> lookupInAllReplicasReq final Map clientContext, final RetryStrategy retryStrategy, final Duration timeout, - final RequestSpan parent + final RequestSpan parent, + final CoreReadPreference readPreference ) { notNullOrEmpty(documentId, "Id"); @@ -450,26 +467,37 @@ public static CompletableFuture> lookupInAllReplicasReq final BucketConfig config = core.clusterConfig().bucketConfig(collectionIdentifier.bucket()); if (config instanceof CouchbaseBucketConfig) { + CouchbaseBucketConfig topology = (CouchbaseBucketConfig) config; if (!config.bucketCapabilities().contains(BucketCapabilities.SUBDOC_READ_REPLICA)) { return failedFuture(FeatureNotAvailableException.subdocReadReplica()); } - int numReplicas = ((CouchbaseBucketConfig) config).numberOfReplicas(); + int numReplicas = topology.numberOfReplicas(); List requests = new ArrayList<>(numReplicas + 1); + NodeIndexCalculator allowedNodeIndexes = new NodeIndexCalculator(readPreference, topology, coreContext); - RequestSpan span = environment.requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_LOOKUP_IN, parent); - SubdocGetRequest activeRequest = SubdocGetRequest.create(timeout, coreContext, collectionIdentifier, retryStrategy, documentId, (byte)0, commands, span); - activeRequest.context().clientContext(clientContext); - requests.add(activeRequest); + if (allowedNodeIndexes.canUseNodeForActive(documentId)) { + RequestSpan span = coreContext.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_REQUEST_KV_LOOKUP_IN, parent); + SubdocGetRequest activeRequest = SubdocGetRequest.create(timeout, coreContext, collectionIdentifier, retryStrategy, documentId, (byte) 0, commands, span); + activeRequest.context().clientContext(clientContext); + requests.add(activeRequest); + } for (short replica = 1; replica <= numReplicas; replica++) { - RequestSpan replicaSpan = environment.requestTracer().requestSpan(TracingIdentifiers.SPAN_LOOKUP_IN_ALL_REPLICAS, parent); - ReplicaSubdocGetRequest replicaRequest = ReplicaSubdocGetRequest.create( - timeout, coreContext, collectionIdentifier, retryStrategy, documentId, (byte)0, commands, replica, replicaSpan - ); - replicaRequest.context().clientContext(clientContext); - requests.add(replicaRequest); + if (allowedNodeIndexes.canUseNodeForReplica(documentId, replica - 1)) { + RequestSpan replicaSpan = coreContext.coreResources().requestTracer().requestSpan(TracingIdentifiers.SPAN_LOOKUP_IN_ALL_REPLICAS, parent); + ReplicaSubdocGetRequest replicaRequest = ReplicaSubdocGetRequest.create( + timeout, coreContext, collectionIdentifier, retryStrategy, documentId, (byte) 0, commands, replica, replicaSpan + ); + replicaRequest.context().clientContext(clientContext); + requests.add(replicaRequest); + } + } + if (requests.isEmpty()) { + CompletableFuture> future = new CompletableFuture<>(); + future.completeExceptionally(DocumentUnretrievableException.noReplicasSuitable()); + return future; } return CompletableFuture.completedFuture(requests.stream()); } else if (config == null) { @@ -478,7 +506,7 @@ public static CompletableFuture> lookupInAllReplicasReq final Duration retryDelay = Duration.ofMillis(100); final CompletableFuture> future = new CompletableFuture<>(); coreContext.environment().timer().schedule(() -> { - lookupInAllReplicasRequests(core, collectionIdentifier, documentId, commands, clientContext, retryStrategy, timeout.minus(retryDelay), parent).whenComplete((getRequestStream, throwable) -> { + lookupInAllReplicasRequests(core, collectionIdentifier, documentId, commands, clientContext, retryStrategy, timeout.minus(retryDelay), parent, readPreference).whenComplete((getRequestStream, throwable) -> { if (throwable != null) { future.completeExceptionally(throwable); } else { diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterIdentifier.java b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterIdentifier.java new file mode 100644 index 000000000..3f8c72eeb --- /dev/null +++ b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterIdentifier.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.couchbase.client.core.topology; + +import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.JsonNode; +import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.node.ObjectNode; +import reactor.util.annotation.Nullable; + +import static com.couchbase.client.core.logging.RedactableArgument.redactMeta; + +@Stability.Internal +public class ClusterIdentifier { + private final String clusterUuid; + private final String clusterName; + + ClusterIdentifier(String clusterUuid, String clusterName) { + this.clusterUuid = clusterUuid; + this.clusterName = clusterName; + } + + public static @Nullable ClusterIdentifier parse(ObjectNode config) { + JsonNode clusterUuid = config.path("clusterUUID"); + JsonNode clusterName = config.path("clusterName"); + if (clusterUuid.isMissingNode() || clusterName.isMissingNode()) { + return null; + } + return new ClusterIdentifier(clusterUuid.asText(), clusterName.asText()); + } + + public String clusterUuid() { + return clusterUuid; + } + + public String clusterName() { + return clusterName; + } + + @Override + public String toString() { + return "ClusterIdent{" + + "clusterUuid='" + clusterUuid + '\'' + + ", clusterName='" + redactMeta(clusterName) + '\'' + + '}'; + } +} diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterIdentifierUtil.java b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterIdentifierUtil.java new file mode 100644 index 000000000..033c7ade1 --- /dev/null +++ b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterIdentifierUtil.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.couchbase.client.core.topology; + +import com.couchbase.client.core.config.ClusterConfig; +import reactor.util.annotation.Nullable; + +public class ClusterIdentifierUtil { + private ClusterIdentifierUtil() {} + + public static @Nullable ClusterIdentifier fromConfig(@Nullable ClusterConfig config) { + return config == null ? null : config.globalConfig() == null ? null : config.globalConfig().clusterIdent(); + } +} diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopology.java b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopology.java index eda14a390..3858c9024 100644 --- a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopology.java +++ b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopology.java @@ -36,9 +36,11 @@ public class ClusterTopology { private final NetworkResolution network; private final Set capabilities; private final List nodes; + @Nullable private final ClusterIdentifier clusterIdent; public static ClusterTopology of( TopologyRevision revision, + @Nullable ClusterIdentifier clusterIdent, List nodes, Set capabilities, NetworkResolution network, @@ -52,7 +54,8 @@ public static ClusterTopology of( capabilities, network, portSelector, - bucket + bucket, + clusterIdent ); } @@ -61,7 +64,8 @@ public static ClusterTopology of( nodes, capabilities, network, - portSelector + portSelector, + clusterIdent ); } @@ -70,7 +74,8 @@ protected ClusterTopology( List nodes, Set capabilities, NetworkResolution network, - PortSelector portSelector + PortSelector portSelector, + @Nullable ClusterIdentifier clusterIdent ) { if (network.equals(NetworkResolution.AUTO)) { throw new IllegalArgumentException("Must resolve 'auto' network before creating config."); @@ -81,6 +86,7 @@ protected ClusterTopology( this.capabilities = unmodifiableSet(newEnumSet(ClusterCapability.class, capabilities)); this.network = requireNonNull(network); this.tls = requireNonNull(portSelector) == PortSelector.TLS; + this.clusterIdent = clusterIdent; } public TopologyRevision revision() { @@ -114,12 +120,17 @@ public ClusterTopologyWithBucket requireBucket() { throw new NoSuchElementException("Bucket topology is absent."); } + @Nullable public ClusterIdentifier id() { + return clusterIdent; + } + @Override public String toString() { String bucket = this instanceof ClusterTopologyWithBucket ? this.requireBucket().bucket().toString() : ""; return "ClusterTopology{" + "revision=" + revision + + ", clusterIdent=" + clusterIdent + ", tls=" + tls + ", network=" + network + ", capabilities=" + capabilities + diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyParser.java b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyParser.java index 78d20c455..a37194abe 100644 --- a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyParser.java +++ b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyParser.java @@ -123,8 +123,11 @@ resolvedNetwork, redactSystem(it) BucketTopology bucket = BucketTopology.parse(clusterConfig, nodesReadyToServiceThisBucket, memcachedHashingStrategy); + ClusterIdentifier clusterIdent = ClusterIdentifier.parse(clusterConfig); + return ClusterTopology.of( TopologyRevision.parse(clusterConfig), + clusterIdent, resolvedNodes, parseCapabilities(clusterConfig), resolvedNetwork, diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyWithBucket.java b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyWithBucket.java index 53afd70cc..2c56aefc9 100644 --- a/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyWithBucket.java +++ b/core-io/src/main/java/com/couchbase/client/core/topology/ClusterTopologyWithBucket.java @@ -18,6 +18,7 @@ import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.env.NetworkResolution; +import reactor.util.annotation.Nullable; import java.util.List; import java.util.Set; @@ -40,9 +41,10 @@ public class ClusterTopologyWithBucket extends ClusterTopology { Set capabilities, NetworkResolution network, PortSelector portSelector, - BucketTopology bucket + BucketTopology bucket, + @Nullable ClusterIdentifier clusterIdent ) { - super(revision, nodes, capabilities, network, portSelector); + super(revision, nodes, capabilities, network, portSelector, clusterIdent); this.bucket = requireNonNull(bucket); } diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePorts.java b/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePorts.java index 5812197a6..7229d0fb0 100644 --- a/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePorts.java +++ b/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePorts.java @@ -50,6 +50,7 @@ public class HostAndServicePorts implements KetamaRingNode { "", emptyMap(), new NodeIdentifier("", 0, ""), + null, null ); @@ -57,17 +58,20 @@ public class HostAndServicePorts implements KetamaRingNode { private final Map ports; private final NodeIdentifier id; @Nullable private final HostAndPort ketamaAuthority; + @Nullable private final String serverGroup; public HostAndServicePorts( String host, Map ports, NodeIdentifier id, - @Nullable HostAndPort ketamaAuthority + @Nullable HostAndPort ketamaAuthority, + @Nullable String serverGroup ) { this.host = requireNonNull(host); this.ports = unmodifiableMap(newEnumMap(ServiceType.class, ports)); this.id = requireNonNull(id); this.ketamaAuthority = ketamaAuthority; + this.serverGroup = serverGroup; } public boolean inaccessible() { @@ -105,6 +109,10 @@ public Map ports() { return ports; } + public @Nullable String serverGroup() { + return serverGroup; + } + public boolean has(ServiceType serviceType) { return ports.containsKey(serviceType); } @@ -121,7 +129,7 @@ public HostAndServicePorts without(ServiceType service, ServiceType... moreServi temp.remove(t); } - return new HostAndServicePorts(this.host, temp, this.id, this.ketamaAuthority); + return new HostAndServicePorts(this.host, temp, this.id, this.ketamaAuthority, this.serverGroup); } @Stability.Internal @@ -129,7 +137,7 @@ public HostAndServicePorts withKetamaAuthority(@Nullable HostAndPort ketamaAutho if (Objects.equals(this.ketamaAuthority, ketamaAuthority)) { return this; } - return new HostAndServicePorts(this.host, this.ports, this.id, ketamaAuthority); + return new HostAndServicePorts(this.host, this.ports, this.id, ketamaAuthority, this.serverGroup); } boolean matches(SeedNode seedNode) { diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePortsParser.java b/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePortsParser.java index 219d9c658..6b03b7007 100644 --- a/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePortsParser.java +++ b/core-io/src/main/java/com/couchbase/client/core/topology/HostAndServicePortsParser.java @@ -54,13 +54,16 @@ public static Map parse( ) { Map raw = parseIntermediate(json); HostAndPort ketamaAuthority = getKetamaAuthority(raw); + String serverGroup = json.path("serverGroup").asText(); + final String serverGroupFinal = serverGroup.isEmpty() ? null : serverGroup; return transformValues(raw, value -> new HostAndServicePorts( value.host, portSelector.selectPorts(value.rawServicePorts), getId(value.host, raw), - ketamaAuthority + ketamaAuthority, + serverGroupFinal ) ); } diff --git a/core-io/src/main/java/com/couchbase/client/core/topology/NodeIdentifier.java b/core-io/src/main/java/com/couchbase/client/core/topology/NodeIdentifier.java index 403d839b7..e61720360 100644 --- a/core-io/src/main/java/com/couchbase/client/core/topology/NodeIdentifier.java +++ b/core-io/src/main/java/com/couchbase/client/core/topology/NodeIdentifier.java @@ -19,8 +19,6 @@ import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.util.HostAndPort; -import java.util.Objects; - import static java.util.Objects.requireNonNull; @Stability.Internal @@ -28,8 +26,12 @@ public class NodeIdentifier { private final HostAndPort canonical; // manager host:port on default network private final String hostForNetworkConnections; - public NodeIdentifier(String host, int port, String hostForNetworkConnections) { - this(new HostAndPort(host, port), hostForNetworkConnections); + public NodeIdentifier( + String canonicalHost, + int canonicalPort, + String hostForNetworkConnections + ) { + this(new HostAndPort(canonicalHost, canonicalPort), hostForNetworkConnections); } public NodeIdentifier(HostAndPort canonical, String hostForNetworkConnections) { @@ -37,6 +39,13 @@ public NodeIdentifier(HostAndPort canonical, String hostForNetworkConnections) { this.hostForNetworkConnections = requireNonNull(hostForNetworkConnections); } + public static NodeIdentifier forBootstrap(String bootstrapHost, int bootstrapPort) { + // This address isn't really "canonical", since it may be an "external" address. + // If it's an external address, the node created from this identifier will be discarded + // when the config with the _real_ canonical addresses is applied. + return new NodeIdentifier(new HostAndPort(bootstrapHost, bootstrapPort), bootstrapHost); + } + @Deprecated public com.couchbase.client.core.node.NodeIdentifier toLegacy() { return new com.couchbase.client.core.node.NodeIdentifier(canonical, hostForNetworkConnections); @@ -56,12 +65,12 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(canonical); + return canonical.hashCode(); } @Override public String toString() { - return "NodeID{" + + return "NodeIdentifier{" + "canonical=" + canonical + ", hostForNetworkConnections='" + hostForNetworkConnections + '\'' + '}'; diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionAttemptContext.java b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionAttemptContext.java index 9bd1b6d45..d213158ba 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionAttemptContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionAttemptContext.java @@ -19,18 +19,22 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.annotation.UsedBy; +import com.couchbase.client.core.api.kv.CoreKvResponseMetadata; +import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.api.query.CoreQueryContext; import com.couchbase.client.core.api.query.CoreQueryOps; import com.couchbase.client.core.api.query.CoreQueryOptions; import com.couchbase.client.core.api.query.CoreQueryOptionsTransactions; import com.couchbase.client.core.api.query.CoreQueryResult; -import com.couchbase.client.core.classic.query.ClassicCoreQueryResult; import com.couchbase.client.core.api.query.CoreQueryStatus; +import com.couchbase.client.core.classic.query.ClassicCoreQueryResult; import com.couchbase.client.core.classic.query.ClassicCoreReactiveQueryResult; import com.couchbase.client.core.cnc.Event; import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.cnc.RequestTracer; import com.couchbase.client.core.cnc.TracingIdentifiers; +import com.couchbase.client.core.cnc.events.transaction.IllegalDocumentStateEvent; +import com.couchbase.client.core.cnc.events.transaction.TransactionLogEvent; import com.couchbase.client.core.deps.com.fasterxml.jackson.core.JsonProcessingException; import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.JsonNode; import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.node.ArrayNode; @@ -42,6 +46,7 @@ import com.couchbase.client.core.error.DecodingFailureException; import com.couchbase.client.core.error.DocumentExistsException; import com.couchbase.client.core.error.DocumentNotFoundException; +import com.couchbase.client.core.error.DocumentUnretrievableException; import com.couchbase.client.core.error.FeatureNotAvailableException; import com.couchbase.client.core.error.context.ReducedKeyValueErrorContext; import com.couchbase.client.core.error.transaction.ActiveTransactionRecordEntryNotFoundException; @@ -74,13 +79,12 @@ import com.couchbase.client.core.msg.kv.InsertResponse; import com.couchbase.client.core.msg.kv.SubdocCommandType; import com.couchbase.client.core.msg.kv.SubdocGetRequest; -import com.couchbase.client.core.msg.kv.SubdocGetResponse; import com.couchbase.client.core.msg.kv.SubdocMutateRequest; import com.couchbase.client.core.msg.kv.SubdocMutateResponse; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.reactor.Jitter; import com.couchbase.client.core.retry.reactor.Retry; import com.couchbase.client.core.retry.reactor.RetryExhaustedException; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.transaction.atr.ActiveTransactionRecordIds; import com.couchbase.client.core.transaction.cleanup.CleanupRequest; import com.couchbase.client.core.transaction.cleanup.CoreTransactionsCleanup; @@ -94,11 +98,11 @@ import com.couchbase.client.core.transaction.components.OperationTypes; import com.couchbase.client.core.transaction.components.TransactionLinks; import com.couchbase.client.core.transaction.config.CoreMergedTransactionConfig; +import com.couchbase.client.core.transaction.error.internal.ErrorClass; import com.couchbase.client.core.transaction.forwards.CoreTransactionsExtension; import com.couchbase.client.core.transaction.forwards.ForwardCompatibility; import com.couchbase.client.core.transaction.forwards.ForwardCompatibilityStage; import com.couchbase.client.core.transaction.log.CoreTransactionLogger; -import com.couchbase.client.core.cnc.events.transaction.TransactionLogEvent; import com.couchbase.client.core.transaction.support.AttemptState; import com.couchbase.client.core.transaction.support.OptionsUtil; import com.couchbase.client.core.transaction.support.SpanWrapper; @@ -110,15 +114,13 @@ import com.couchbase.client.core.transaction.util.DebugUtil; import com.couchbase.client.core.transaction.util.LockTokens; import com.couchbase.client.core.transaction.util.LogDeferThrowable; +import com.couchbase.client.core.transaction.util.MeteringUnits; import com.couchbase.client.core.transaction.util.MonoBridge; import com.couchbase.client.core.transaction.util.QueryUtil; import com.couchbase.client.core.transaction.util.ReactiveLock; import com.couchbase.client.core.transaction.util.ReactiveWaitGroup; import com.couchbase.client.core.transaction.util.TransactionKVHandler; import com.couchbase.client.core.transaction.util.TriFunction; -import com.couchbase.client.core.transaction.error.internal.ErrorClass; -import com.couchbase.client.core.cnc.events.transaction.IllegalDocumentStateEvent; -import com.couchbase.client.core.transaction.util.MeteringUnits; import com.couchbase.client.core.util.BucketConfigUtil; import com.couchbase.client.core.util.CbPreconditions; import reactor.core.publisher.Flux; @@ -134,7 +136,6 @@ import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Optional; @@ -199,7 +200,7 @@ public TransactionQueryContext(NodeIdentifier queryTarget, @Nullable CoreQueryCo public static final int STATE_BITS_POSITION_FINAL_ERROR = 4; public static final int STATE_BITS_MASK_FINAL_ERROR = 0b1110000; public static final int STATE_BITS_MASK_BITS = 0b0001111; - public static final int UNSTAGING_PARALLELISM = 1000; + public static final int UNSTAGING_PARALLELISM = Integer.parseInt(System.getProperty("com.couchbase.transactions.unstagingParallelism", "1000"));; private final AtomicInteger stateBits = new AtomicInteger(0); @@ -438,7 +439,19 @@ private Mono> getInternal(CollectionIdentifie if (queryModeLocked()) { return getWithQueryLocked(collection, id, lockToken, span); } else { - return getWithKVLocked(collection, id, Optional.empty(), span, lockToken); + return getWithKVLocked(collection, id, Optional.empty(), span, lockToken, false); + } + })); + } + + private Mono> getReplicaFromPreferredServerGroupInternal(CollectionIdentifier collection, String id, SpanWrapper pspan) { + + return doKVOperation("get " + DebugUtil.docId(collection, id), pspan, CoreTransactionAttemptContextHooks.HOOK_GET, collection, id, + (operationId, span, lockToken) -> Mono.defer(() -> { + if (queryModeLocked()) { + return Mono.error(new FeatureNotAvailableException("getReplicaFromPreferredServerGroup cannot presently be used in a transaction that has previously involved the query service. It can however be used before any query call.")); + } else { + return getWithKVLocked(collection, id, Optional.empty(), span, lockToken, true); } })); } @@ -447,12 +460,13 @@ private Mono> getWithKVLocked(CollectionIdent String id, Optional resolvingMissingATREntry, SpanWrapper pspan, - ReactiveLock.Waiter lockToken) { + ReactiveLock.Waiter lockToken, + boolean preferredReplicaMode) { return Mono.defer(() -> { assertLocked("getWithKV"); - LOGGER.info(attemptId, "getting doc {}, resolvingMissingATREntry={}", DebugUtil.docId(collection, id), - resolvingMissingATREntry.orElse("")); + LOGGER.info(attemptId, "getting doc {}, resolvingMissingATREntry={}, preferredReplicaMode={}", DebugUtil.docId(collection, id), + resolvingMissingATREntry.orElse(""), preferredReplicaMode); Optional ownWrite = checkForOwnWriteLocked(collection, id); if (ownWrite.isPresent()) { @@ -497,7 +511,8 @@ private Mono> getWithKVLocked(CollectionIdent pspan, resolvingMissingATREntry, units, - overall.supported())) + overall.supported(), + preferredReplicaMode)) .publishOn(scheduler()) @@ -509,7 +524,10 @@ private Mono> getWithKVLocked(CollectionIdent LOGGER.warn(attemptId, "got error while getting doc {}{} in {}us: {}", DebugUtil.docId(collection, id), DebugUtil.dbg(built), pspan.elapsedMicros(), dbg(err)); - if (err instanceof ForwardCompatibilityRequiresRetryException + if (err instanceof DocumentUnretrievableException) { + return Mono.error(err); + } + else if (err instanceof ForwardCompatibilityRequiresRetryException || err instanceof ForwardCompatibilityFailureException) { TransactionOperationFailedException.Builder error = createError() .cause(new ForwardCompatibilityFailureException()); @@ -536,7 +554,8 @@ else if (err instanceof ActiveTransactionRecordNotFoundException || err instance id, Optional.of(attemptIdToCheck), pspan, - newLockToken) + newLockToken, + preferredReplicaMode) .onErrorResume(e -> unlock(newLockToken, "relock error") .then(Mono.error(e)))); @@ -689,6 +708,22 @@ public Mono get(CollectionIdentifier collection, Strin }); } + public Mono getReplicaFromPreferredServerGroup(CollectionIdentifier collection, String id) { + return Mono.defer(() -> { + SpanWrapper span = SpanWrapperUtil.createOp(this, tracer(), collection, id, TracingIdentifiers.TRANSACTION_OP_GET_REPLICA_FROM_PREFERRED_SERVER_GROUP, attemptSpan); + return getReplicaFromPreferredServerGroupInternal(collection, id, span) + .doOnError(err -> span.finishWithErrorStatus()) + .flatMap(doc -> { + span.finish(); + if (doc.isPresent()) { + return Mono.just(doc.get()); + } else { + return Mono.error(new DocumentUnretrievableException(ReducedKeyValueErrorContext.create(id))); + } + }); + }); + } + boolean hasExpiredClientSide(String place, Optional docId) { boolean over = overall.hasExpiredClientSide(); boolean hook = hooks.hasExpiredClientSideHook.apply(this, place, docId); @@ -1462,7 +1497,7 @@ long expiryRemainingMillis() { private RequestTracer tracer() { // Will go to the ThresholdRequestTracer by default. In future, may want our own default tracer. - return core.context().environment().requestTracer(); + return core.context().coreResources().requestTracer(); } private byte[] serialize(Object in) { @@ -1871,7 +1906,7 @@ private Mono handleDocExistsDuringStagedInsert(String return hooks.beforeGetDocInExistsDuringStagedInsert.apply(this, id) // testing hook - .then(DocumentGetter.justGetDoc(core, collection, id, kvTimeoutNonMutating(), pspan, true, logger(), units)) + .then(DocumentGetter.justGetDoc(core, collection, id, kvTimeoutNonMutating(), pspan, true, logger(), units, false)) .publishOn(scheduler()) @@ -1894,18 +1929,18 @@ private Mono handleDocExistsDuringStagedInsert(String .flatMap(v -> { if (v.isPresent()) { - Tuple2 results = v.get(); + Tuple2 results = v.get(); CoreTransactionGetResult r = results.getT1(); - SubdocGetResponse lir = results.getT2(); + CoreSubdocGetResult lir = results.getT2(); MeteringUnits built = addUnits(units.build()); LOGGER.info(attemptId, "{} doc {} exists inTransaction={} isDeleted={}{}", - bp, DebugUtil.docId(collection, id), r.links(), lir.isDeleted(), DebugUtil.dbg(built)); + bp, DebugUtil.docId(collection, id), r.links(), lir.tombstone(), DebugUtil.dbg(built)); return forwardCompatibilityCheck(ForwardCompatibilityStage.WRITE_WRITE_CONFLICT_INSERTING_GET, r.links().forwardCompatibility()) .then(Mono.defer(() -> { - if (lir.isDeleted() && !r.links().isDocumentInTransaction()) { + if (lir.tombstone() && !r.links().isDocumentInTransaction()) { LOGGER.info(attemptId, "{} doc {} is a regular tombstone without txn metadata, proceeding to overwrite", bp, DebugUtil.docId(collection, id)); @@ -1969,12 +2004,12 @@ private Mono overwriteStagedInsert(String operationId, SpanWrapper pspan, String bp, CoreTransactionGetResult r, - SubdocGetResponse lir) { + CoreSubdocGetResult lir) { return Mono.defer(() -> { CbPreconditions.check(r.links().isDocumentInTransaction()); CbPreconditions.check(r.links().op().get().equals(OperationTypes.INSERT)); - if (lir.isDeleted()) { + if (lir.tombstone()) { return createStagedInsert(operationId, collection, id, content, flags, pspan, Optional.of(r.cas())); } else { @@ -2641,14 +2676,9 @@ private Mono commitDocsLocked(SpanWrapper span) { long start = System.nanoTime(); return Flux.fromIterable(stagedMutationsLocked) - .parallel(UNSTAGING_PARALLELISM) - .runOn(scheduler()) - - .concatMap(staged -> { - return commitDocWrapperLocked(span, staged); - }) + .publishOn(scheduler()) - .sequential() + .flatMap(staged -> commitDocWrapperLocked(span, staged), UNSTAGING_PARALLELISM) .then(Mono.defer(() -> { long elapsed = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - start); @@ -2839,6 +2869,10 @@ false, false, false, false, false, cas, staged.stagedUserFlags, durabilityLevel( }); } + private void addUnits(CoreKvResponseMetadata meta) { + meteringUnitsBuilder.add(meta); + } + private void addUnits(@Nullable MemcacheProtocol.FlexibleExtras flexibleExtras) { meteringUnitsBuilder.add(flexibleExtras); } @@ -2931,7 +2965,7 @@ private Mono handleDocChangedDuringCommit(SpanWrapper span, } }) .then(hooks.beforeDocChangedDuringCommit.apply(this, id)) // testing hook - .then(DocumentGetter.getAsync(core, LOGGER, staged.collection, config, staged.id, attemptId, true, span, Optional.empty(), units, overall.supported())) + .then(DocumentGetter.getAsync(core, LOGGER, staged.collection, config, staged.id, attemptId, true, span, Optional.empty(), units, overall.supported(), false)) .publishOn(scheduler()) .onErrorResume(err -> { ErrorClass ec = classify(err); @@ -3001,7 +3035,7 @@ private Mono handleDocChangedDuringStaging(SpanWrapper span, throwIfExpired(id, HOOK_STAGING_DOC_CHANGED); }) .then(hooks.beforeDocChangedDuringStaging.apply(this, id)) // testing hook - .then(DocumentGetter.getAsync(core, LOGGER, collection, config, id, attemptId, true, span, Optional.empty(), units, overall.supported())) + .then(DocumentGetter.getAsync(core, LOGGER, collection, config, id, attemptId, true, span, Optional.empty(), units, overall.supported(), false)) .publishOn(scheduler()) .onErrorResume(err -> { MeteringUnits built = addUnits(units.build()); @@ -3089,7 +3123,7 @@ private Mono handleDocChangedDuringRollback(SpanWrapper span, throwIfExpired(id, HOOK_ROLLBACK_DOC_CHANGED); }) .then(hooks.beforeDocChangedDuringRollback.apply(this, id)) // testing hook - .then(DocumentGetter.getAsync(core, LOGGER, collection, config, id, attemptId, true, span, Optional.empty(), units, overall.supported())) + .then(DocumentGetter.getAsync(core, LOGGER, collection, config, id, attemptId, true, span, Optional.empty(), units, overall.supported(), false)) .publishOn(scheduler()) .onErrorResume(err -> { MeteringUnits built = addUnits(units.build()); @@ -3170,6 +3204,7 @@ private Mono atrCommitAmbiguityResolutionLocked(AtomicReference over .then(hooks.beforeAtrCommitAmbiguityResolution.apply(this)) // testing hook .then(TransactionKVHandler.lookupIn(core, atrCollection.get(), atrId.get(), kvTimeoutNonMutating(), false, OptionsUtil.createClientContext("atrCommitAmbiguityResolution"), span, + false, Arrays.asList( new SubdocGetRequest.Command(SubdocCommandType.GET, "attempts." + attemptId + "." + TransactionFields.ATR_FIELD_STATUS, true, 0) ))) @@ -3178,15 +3213,14 @@ private Mono atrCommitAmbiguityResolutionLocked(AtomicReference over .flatMap(result -> { String status = null; try { - status = Mapper.reader().readValue(result.values()[0].value(), String.class); + status = Mapper.reader().readValue(result.field(0).value(), String.class); } catch (IOException e) { - LOGGER.info(attemptId, "failed to parse ATR {} status '{}'", getAtrDebug(atrCollection, atrId), new String(result.values()[0].value())); + LOGGER.info(attemptId, "failed to parse ATR {} status '{}'", getAtrDebug(atrCollection, atrId), new String(result.field(0).value())); status = "UNKNOWN"; } - addUnits(result.flexibleExtras()); - LOGGER.info(attemptId, "got status of ATR {}{}: '{}'", getAtrDebug(atrCollection, atrId), - DebugUtil.dbg(result.flexibleExtras()), status); + addUnits(result.meta()); + LOGGER.info(attemptId, "got status of ATR {}: '{}'", getAtrDebug(atrCollection, atrId), status); AttemptState state = AttemptState.convert(status); @@ -3611,19 +3645,16 @@ private Mono atrRollbackCompleteLocked(boolean isAppRollback, String prefi private Mono rollbackDocsLocked(boolean isAppRollback, SpanWrapper span) { return Mono.defer(() -> { return Flux.fromIterable(stagedMutationsLocked) - .parallel(UNSTAGING_PARALLELISM) - .runOn(scheduler()) + .publishOn(scheduler()) - .concatMap(staged -> { + .flatMap(staged -> { switch (staged.type) { case INSERT: return rollbackStagedInsertLocked(isAppRollback, span, staged.collection, staged.id, staged.cas); default: return rollbackStagedReplaceOrRemoveLocked(isAppRollback, span, staged.collection, staged.id, staged.cas, staged.currentUserFlags); } - }) - - .sequential() + }, UNSTAGING_PARALLELISM) .doOnNext(v -> { LOGGER.info(attemptId, "rollback - docs rolled back"); diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionContext.java b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionContext.java index 6326e3158..a1e802153 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionContext.java @@ -57,7 +57,7 @@ public CoreTransactionContext(CoreContext coreContext, CoreTransactionsCleanup cleanup) { this.config = Objects.requireNonNull(config); this.cleanup = Objects.requireNonNull(cleanup); - RequestTracer tracer = coreContext.environment().requestTracer(); + RequestTracer tracer = coreContext.coreResources().requestTracer(); SpanWrapper pspan = config.parentSpan().map(sp -> new SpanWrapper(sp)).orElse(null); this.transactionSpan = SpanWrapper.create(tracer, TracingIdentifiers.TRANSACTION_OP, pspan); SpanWrapperUtil.setAttributes(this.transactionSpan, null, null, null) diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionGetResult.java b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionGetResult.java index 6d20dbba5..79c755768 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionGetResult.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionGetResult.java @@ -17,6 +17,7 @@ package com.couchbase.client.core.transaction; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.JsonNode; import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.ObjectMapper; import com.couchbase.client.core.io.CollectionIdentifier; @@ -237,7 +238,7 @@ public static CoreTransactionGetResult createFrom(CoreTransactionGetResult doc, @Stability.Internal public static CoreTransactionGetResult createFrom(CollectionIdentifier collection, String documentId, - SubdocGetResponse doc) throws IOException { + CoreSubdocGetResult doc) throws IOException { Optional atrId = Optional.empty(); Optional transactionId = Optional.empty(); Optional attemptId = Optional.empty(); @@ -259,16 +260,16 @@ public static CoreTransactionGetResult createFrom(CollectionIdentifier collectio Optional op = Optional.empty(); // "txn.id" - if (doc.values()[0].status().success()) { - JsonNode id = MAPPER.readValue(doc.values()[0].value(), JsonNode.class); + if (doc.field(0).status().success()) { + JsonNode id = MAPPER.readValue(doc.field(0).value(), JsonNode.class); transactionId = Optional.ofNullable(id.path("txn").textValue()); attemptId = Optional.ofNullable(id.path("atmpt").textValue()); operationId = Optional.ofNullable(id.path("op").textValue()); } // "txn.atr" - if (doc.values()[1].status().success()) { - JsonNode atr = MAPPER.readValue(doc.values()[1].value(), JsonNode.class); + if (doc.field(1).status().success()) { + JsonNode atr = MAPPER.readValue(doc.field(1).value(), JsonNode.class); atrId = Optional.ofNullable(atr.path("id").textValue()); atrBucketName = Optional.ofNullable(atr.path("bkt").textValue()); String scope = atr.path("scp").textValue(); @@ -287,24 +288,24 @@ public static CoreTransactionGetResult createFrom(CollectionIdentifier collectio } // "txn.op.type" - if (doc.values()[2].status().success()) { - op = Optional.of(Mapper.reader().readValue(doc.values()[2].value(), String.class)); + if (doc.field(2).status().success()) { + op = Optional.of(Mapper.reader().readValue(doc.field(2).value(), String.class)); } // "txn.op.stgd" - if (doc.values()[3].status().success()) { - byte[] raw = doc.values()[3].value(); + if (doc.field(3).status().success()) { + byte[] raw = doc.field(3).value(); stagedContentJson = Optional.of(raw); } // "txn.op.crc32" - if (doc.values()[4].status().success()) { - crc32OfStaging = Optional.of(Mapper.reader().readValue(doc.values()[4].value(), String.class)); + if (doc.field(4).status().success()) { + crc32OfStaging = Optional.of(Mapper.reader().readValue(doc.field(4).value(), String.class)); } // "txn.restore" - if (doc.values()[5].status().success()) { - JsonNode restore = MAPPER.readValue(doc.values()[5].value(), JsonNode.class); + if (doc.field(5).status().success()) { + JsonNode restore = MAPPER.readValue(doc.field(5).value(), JsonNode.class); casPreTxn = Optional.of(restore.path("CAS").textValue()); // Only present in 6.5+ revidPreTxn = Optional.of(restore.path("revid").textValue()); @@ -312,17 +313,17 @@ public static CoreTransactionGetResult createFrom(CollectionIdentifier collectio } // "txn.fc" - if (doc.values()[6].status().success()) { - JsonNode json = MAPPER.readValue(doc.values()[6].value(), JsonNode.class); + if (doc.field(6).status().success()) { + JsonNode json = MAPPER.readValue(doc.field(6).value(), JsonNode.class); ForwardCompatibility fc = new ForwardCompatibility(json); forwardCompatibility = Optional.of(fc); } - if (!doc.values()[7].status().success()) { + if (!doc.field(7).status().success()) { throw new IllegalStateException("$document requested but not received"); } // Read from $document - JsonNode restore = MAPPER.readValue(doc.values()[7].value(), JsonNode.class); + JsonNode restore = MAPPER.readValue(doc.field(7).value(), JsonNode.class); String casFromDocument = restore.path("CAS").textValue(); // Only present in 6.5+ String revidFromDocument = restore.path("revid").textValue(); @@ -333,14 +334,14 @@ public static CoreTransactionGetResult createFrom(CollectionIdentifier collectio int currentUserFlags = restore.path("flags").intValue(); // "txn.op.bin" - if (doc.values()[8].status().success()) { - byte[] raw = doc.values()[8].value(); + if (doc.field(8).status().success()) { + byte[] raw = doc.field(8).value(); stagedContentBinary = Optional.of(raw); } // "txn.aux" - if (doc.values()[9].status().success()) { - JsonNode aux = MAPPER.readValue(doc.values()[9].value(), JsonNode.class); + if (doc.field(9).status().success()) { + JsonNode aux = MAPPER.readValue(doc.field(9).value(), JsonNode.class); if (aux.has("uf")) { stagedUserFlags = Optional.of(aux.get("uf").intValue()); } @@ -349,8 +350,8 @@ public static CoreTransactionGetResult createFrom(CollectionIdentifier collectio byte[] bodyContent; // body - if (doc.values()[10].status().success()) { - bodyContent = doc.values()[10].value(); + if (doc.field(10).status().success()) { + bodyContent = doc.field(10).value(); } else { bodyContent = new byte[] {}; @@ -369,7 +370,7 @@ public static CoreTransactionGetResult createFrom(CollectionIdentifier collectio revidPreTxn, exptimePreTxn, op, - doc.isDeleted(), + doc.tombstone(), crc32OfStaging, forwardCompatibility, operationId, diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionsReactive.java b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionsReactive.java index 69a49cc46..e6bbf90f9 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionsReactive.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/CoreTransactionsReactive.java @@ -28,15 +28,17 @@ import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.cnc.TracingIdentifiers; import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.node.TextNode; +import com.couchbase.client.core.error.transaction.RetryTransactionException; +import com.couchbase.client.core.error.transaction.TransactionOperationFailedException; import com.couchbase.client.core.error.transaction.internal.CoreTransactionCommitAmbiguousException; import com.couchbase.client.core.error.transaction.internal.CoreTransactionExpiredException; -import com.couchbase.client.core.error.transaction.TransactionOperationFailedException; +import com.couchbase.client.core.error.transaction.internal.CoreTransactionFailedException; import com.couchbase.client.core.msg.query.QueryChunkRow; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryReason; import com.couchbase.client.core.retry.reactor.DefaultRetry; import com.couchbase.client.core.retry.reactor.Jitter; import com.couchbase.client.core.retry.reactor.RetryContext; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.transaction.config.CoreMergedTransactionConfig; import com.couchbase.client.core.transaction.config.CoreTransactionOptions; import com.couchbase.client.core.transaction.config.CoreTransactionsConfig; @@ -45,8 +47,6 @@ import com.couchbase.client.core.transaction.util.CoreTransactionAttemptContextHooks; import com.couchbase.client.core.transaction.util.DebugUtil; import com.couchbase.client.core.transaction.util.QueryUtil; -import com.couchbase.client.core.error.transaction.RetryTransactionException; -import com.couchbase.client.core.error.transaction.internal.CoreTransactionFailedException; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.util.annotation.Nullable; diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/ClientRecord.java b/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/ClientRecord.java index 6813d7241..b4c7a933f 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/ClientRecord.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/ClientRecord.java @@ -17,6 +17,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.cnc.RequestTracer; import com.couchbase.client.core.cnc.TracingIdentifiers; import com.couchbase.client.core.deps.com.fasterxml.jackson.core.JsonProcessingException; @@ -179,10 +180,10 @@ private Duration nonMutatingTimeout() { return core.context().environment().timeoutConfig().kvTimeout(); } - public static ClientRecordDetails parseClientRecord(SubdocGetResponse clientRecord, String clientUuid) { + public static ClientRecordDetails parseClientRecord(CoreSubdocGetResult clientRecord, String clientUuid) { try { - JsonNode records = Mapper.reader().readValue(clientRecord.values()[0].value(), JsonNode.class); - JsonNode hlcRaw = Mapper.reader().readValue(clientRecord.values()[1].value(), JsonNode.class); + JsonNode records = Mapper.reader().readValue(clientRecord.field(0).value(), JsonNode.class); + JsonNode hlcRaw = Mapper.reader().readValue(clientRecord.field(1).value(), JsonNode.class); ActiveTransactionRecord.ParsedHLC parsedHLC = new ActiveTransactionRecord.ParsedHLC(hlcRaw); JsonNode clients = records.get("clients"); @@ -244,8 +245,9 @@ public static ClientRecordDetails parseClientRecord(SubdocGetResponse clientReco } } - public Mono getClientRecord(CollectionIdentifier collection, @Nullable SpanWrapper span) { + public Mono getClientRecord(CollectionIdentifier collection, @Nullable SpanWrapper span) { return TransactionKVHandler.lookupIn(core, collection, CLIENT_RECORD_DOC_ID, nonMutatingTimeout(), false, OptionsUtil.createClientContext("ClientRecord::getClientRecord"), span, + false, Arrays.asList( new SubdocGetRequest.Command(SubdocCommandType.GET, FIELD_RECORDS, true, 0), new SubdocGetRequest.Command(SubdocCommandType.GET, "$vbucket.HLC", true, 1) @@ -253,7 +255,7 @@ public Mono getClientRecord(CollectionIdentifier collection, } private RequestTracer tracer() { - return core.context().environment().requestTracer(); + return core.context().coreResources().requestTracer(); } /* diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/LostCleanupDistributed.java b/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/LostCleanupDistributed.java index 2f78d3488..ddb280e45 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/LostCleanupDistributed.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/LostCleanupDistributed.java @@ -201,7 +201,7 @@ private static List atrsToHandle(int indexOfThisClient, int numActiveCli } private RequestTracer tracer() { - return core.context().environment().requestTracer(); + return core.context().coreResources().requestTracer(); } /** diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/TransactionsCleaner.java b/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/TransactionsCleaner.java index b928e32f6..24410b6df 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/TransactionsCleaner.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/cleanup/TransactionsCleaner.java @@ -17,6 +17,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.cnc.Event; import com.couchbase.client.core.cnc.RequestTracer; import com.couchbase.client.core.cnc.TracingIdentifiers; @@ -152,7 +153,7 @@ private Mono commitDocs(CoreTransactionLogger perEntryLog, return hooks.beforeCommitDoc.apply(doc.id()) // Testing hook .then(Mono.defer(() -> { - if (lir.isDeleted()) { + if (lir.tombstone()) { return TransactionKVHandler.insert(core, collection, doc.id(), content, doc.links().stagedUserFlags().orElse(CodecFlags.JSON_COMMON_FLAGS), kvDurableTimeout(), req.durabilityLevel(), OptionsUtil.createClientContext("Cleaner::commitDocsInsert"), pspan); } else { @@ -163,7 +164,7 @@ private Mono commitDocs(CoreTransactionLogger perEntryLog, ); return TransactionKVHandler.mutateIn(core, collection, doc.id(), kvDurableTimeout(), false, false, false, - lir.isDeleted(), false, doc.cas(), doc.links().stagedUserFlags().orElse(CodecFlags.JSON_COMMON_FLAGS), + lir.tombstone(), false, doc.cas(), doc.links().stagedUserFlags().orElse(CodecFlags.JSON_COMMON_FLAGS), req.durabilityLevel(), OptionsUtil.createClientContext("Cleaner::commitDocs"), pspan, commands); } @@ -192,7 +193,7 @@ private Mono removeTxnLinks(CoreTransactionLogger perEntryLog, .then(TransactionKVHandler.mutateIn(core, collectionIdentifier, doc.id(), kvDurableTimeout(), false, false, false, - lir.isDeleted(), false, doc.cas(), doc.userFlags(), + lir.tombstone(), false, doc.cas(), doc.userFlags(), req.durabilityLevel(), OptionsUtil.createClientContext("Cleaner::removeTxnLinks"), pspan, Arrays.asList( new SubdocMutateRequest.Command(SubdocCommandType.DELETE, TransactionFields.TRANSACTION_INTERFACE_PREFIX_ONLY, Bytes.EMPTY_BYTE_ARRAY, false, true, false, 0) ))) @@ -240,7 +241,7 @@ private Mono removeDocs(CoreTransactionLogger perEntryLog, return hooks.beforeRemoveDoc.apply(doc.id()) .then(Mono.defer(() -> { - if (lir.isDeleted()) { + if (lir.tombstone()) { return TransactionKVHandler.mutateIn(core, collection, doc.id(), kvDurableTimeout(), false, false, false, true, false, doc.cas(), doc.userFlags(), @@ -266,7 +267,7 @@ private Mono doPerDoc(CoreTransactionLogger perEntryLog, List docs, SpanWrapper pspan, boolean requireCrc32ToMatchStaging, - TriFunction> perDoc) { + TriFunction> perDoc) { return Flux.fromIterable(docs) .publishOn(core.context().environment().transactionsSchedulers().schedulerCleanup()) .concatMap(docRecord -> { @@ -292,21 +293,21 @@ private Mono doPerDocGotDoc(CoreTransactionLogger perEntryLog, String attemptId, SpanWrapper pspan, boolean requireCrc32ToMatchStaging, - TriFunction> perDoc, + TriFunction> perDoc, DocRecord docRecord, CollectionIdentifier collection, MeteringUnits.MeteringUnitsBuilder units) { - return DocumentGetter.justGetDoc(core, collection, docRecord.id(), kvNonMutatingTimeout(), pspan, true, perEntryLog, units) + return DocumentGetter.justGetDoc(core, collection, docRecord.id(), kvNonMutatingTimeout(), pspan, true, perEntryLog, units, false) .flatMap(docOpt -> { if (docOpt.isPresent()) { CoreTransactionGetResult doc = docOpt.get().getT1(); - SubdocGetResponse lir = docOpt.get().getT2(); + CoreSubdocGetResult lir = docOpt.get().getT2(); MeteringUnits built = units.build(); perEntryLog.debug(attemptId, "handling doc {} with cas {} " + "and links {}, isTombstone={}{}", - DebugUtil.docId(doc), doc.cas(), doc.links(), lir.isDeleted(), DebugUtil.dbg(built)); + DebugUtil.docId(doc), doc.cas(), doc.links(), lir.tombstone(), DebugUtil.dbg(built)); if (!doc.links().isDocumentInTransaction()) { // The txn probably committed this doc then crashed. This is fine, can skip. @@ -367,7 +368,7 @@ private Mono doPerDocGotDoc(CoreTransactionLogger perEntryLog, } private RequestTracer tracer() { - return core.context().environment().requestTracer(); + return core.context().coreResources().requestTracer(); } diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/components/ActiveTransactionRecord.java b/core-io/src/main/java/com/couchbase/client/core/transaction/components/ActiveTransactionRecord.java index 998140904..ebd72ea73 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/components/ActiveTransactionRecord.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/components/ActiveTransactionRecord.java @@ -97,6 +97,7 @@ public static Mono> findEntryForTransacti return TransactionKVHandler.lookupIn(core, atrCollection, atrId, kvTimeoutNonMutating(core), false, createClientContext("ATR::findEntryForTransaction"), pspan, + false, Arrays.asList( new SubdocGetRequest.Command(SubdocCommandType.GET, ATR_FIELD_ATTEMPTS + "." + attemptId, true, 0), new SubdocGetRequest.Command(SubdocCommandType.GET, "$vbucket.HLC", true, 1) @@ -104,15 +105,15 @@ false, createClientContext("ATR::findEntryForTransaction"), pspan, .map(d -> { if (units != null) { - units.add(d.flexibleExtras()); + units.add(d.meta()); } - if (!d.values()[0].status().success()) { + if (!d.field(0).status().success()) { return Optional.empty(); } else { try { - JsonNode atr = MAPPER.readValue(d.values()[0].value(), JsonNode.class); - JsonNode hlc = MAPPER.readValue(d.values()[1].value(), JsonNode.class); + JsonNode atr = MAPPER.readValue(d.field(0).value(), JsonNode.class); + JsonNode hlc = MAPPER.readValue(d.field(1).value(), JsonNode.class); ParsedHLC parsedHLC = new ParsedHLC(hlc); ActiveTransactionRecordEntry entry = createFrom(atrCollection.bucket(), @@ -129,10 +130,10 @@ false, createClientContext("ATR::findEntryForTransaction"), pspan, atrCollection.bucket(), atrCollection.scope(), atrCollection.collection(), atrId, attemptId, DebugUtil.dbg(err)); logger.warn("Attempt to dump raw JSON of ATR entry:"); try { - byte[] raw = d.values()[0].value(); + byte[] raw = d.field(0).value(); String asStr = new String(raw, StandardCharsets.UTF_8); logger.info("", "Raw JSON: {}", asStr); - byte[] rawHLC = d.values()[1].value(); + byte[] rawHLC = d.field(1).value(); String asStrHLC = new String(rawHLC, StandardCharsets.UTF_8); logger.info("", "Raw JSON HLC: {}", asStrHLC); } @@ -228,6 +229,7 @@ public static Mono> getAtr(Core core, Duration timeout, @Nullable SpanWrapper pspan) { return TransactionKVHandler.lookupIn(core, atrCollection, atrId, timeout, false, createClientContext("ATR::getAtr"), pspan, + false, Arrays.asList( new SubdocGetRequest.Command(SubdocCommandType.GET, ATR_FIELD_ATTEMPTS, true, 0), new SubdocGetRequest.Command(SubdocCommandType.GET, "$vbucket.HLC", true, 1) @@ -244,8 +246,8 @@ public static Mono> getAtr(Core core, // So this code should always be safe. .map(d -> { try { - JsonNode attempts = MAPPER.readValue(d.values()[0].value(), JsonNode.class); - JsonNode hlc = MAPPER.readValue(d.values()[1].value(), JsonNode.class); + JsonNode attempts = MAPPER.readValue(d.field(0).value(), JsonNode.class); + JsonNode hlc = MAPPER.readValue(d.field(1).value(), JsonNode.class); ParsedHLC parsedHLC = new ParsedHLC(hlc); return Optional.of(mapToAtr(atrCollection, atrId, attempts, parsedHLC.nowInNanos(), parsedHLC.mode())); diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/components/DocumentGetter.java b/core-io/src/main/java/com/couchbase/client/core/transaction/components/DocumentGetter.java index c05d084e5..8c44e6020 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/components/DocumentGetter.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/components/DocumentGetter.java @@ -18,6 +18,7 @@ import com.couchbase.client.core.Core; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.io.CollectionIdentifier; import com.couchbase.client.core.transaction.error.internal.ErrorClass; import com.couchbase.client.core.transaction.util.MeteringUnits; @@ -67,17 +68,18 @@ public static Mono> getAsync(Core core, @Nullable SpanWrapper span, Optional resolvingMissingATREntry, MeteringUnits.MeteringUnitsBuilder units, - CoreTransactionsSupportedExtensions supported) { - return justGetDoc(core, collection, docId, kvTimeoutNonMutating(core), span, true, LOGGER, units) + CoreTransactionsSupportedExtensions supported, + boolean preferredReplicaMode) { + return justGetDoc(core, collection, docId, kvTimeoutNonMutating(core), span, true, LOGGER, units, preferredReplicaMode) .flatMap(origTrans -> { if (justReturn) { return Mono.just(origTrans.map(v -> v.getT1())); } else if (origTrans.isPresent()) { CoreTransactionGetResult r = origTrans.get().getT1(); - SubdocGetResponse lir = origTrans.get().getT2(); + CoreSubdocGetResult lir = origTrans.get().getT2(); if (!r.links().isDocumentInTransaction()) { - if (lir.isDeleted()) { + if (lir.tombstone()) { return Mono.just(Optional.empty()); } else { @@ -127,7 +129,7 @@ else if (resolvingMissingATREntry.equals(r.links().stagedAttemptId())) { }); } - public static Mono>> + public static Mono>> justGetDoc(Core core, CollectionIdentifier collection, String docId, @@ -135,9 +137,11 @@ else if (resolvingMissingATREntry.equals(r.links().stagedAttemptId())) { @Nullable SpanWrapper span, boolean accessDeleted, CoreTransactionLogger logger, - MeteringUnits.MeteringUnitsBuilder units) { + MeteringUnits.MeteringUnitsBuilder units, + boolean preferredReplicaMode) { return TransactionKVHandler.lookupIn(core, collection, docId, timeout, accessDeleted, createClientContext("DocumentGetter::justGetDoc"), span, + preferredReplicaMode, Arrays.asList( // The design doc details why these specs are fetched (rather than all of "txn") new SubdocGetRequest.Command(SubdocCommandType.GET, "txn.id", true, 0), @@ -154,7 +158,7 @@ else if (resolvingMissingATREntry.equals(r.links().stagedAttemptId())) { )) .map(fragment -> { - units.add(fragment.flexibleExtras()); + units.add(fragment.meta()); try { return Optional.of(Tuples.of(CoreTransactionGetResult.createFrom(collection, docId, @@ -183,10 +187,10 @@ else if (resolvingMissingATREntry.equals(r.links().stagedAttemptId())) { }); } - private static void dumpRawLookupInField(CoreTransactionLogger logger, SubdocGetResponse fragment, int index) { + private static void dumpRawLookupInField(CoreTransactionLogger logger, CoreSubdocGetResult fragment, int index) { try { - if (fragment.values()[index].status().success()) { - byte[] raw = fragment.values()[index].value(); + if (fragment.field(index).status().success()) { + byte[] raw = fragment.field(index).value(); String asStr = new String(raw, StandardCharsets.UTF_8); logger.info("", "Field {}: {}", index, asStr); } diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsContext.java b/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsContext.java index 4172037cd..650c8fc28 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsContext.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsContext.java @@ -15,6 +15,7 @@ */ package com.couchbase.client.core.transaction.context; +import com.couchbase.client.core.Core; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.cnc.Meter; @@ -27,8 +28,8 @@ public class CoreTransactionsContext { private final CoreTransactionsCounters counters; - public CoreTransactionsContext(Meter meter) { - this.counters = new CoreTransactionsCounters(meter); + public CoreTransactionsContext(Core core, Meter meter) { + this.counters = new CoreTransactionsCounters(core, meter); } public CoreTransactionsCounters counters() { diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsCounters.java b/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsCounters.java index 985f94ca3..19058aa28 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsCounters.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/context/CoreTransactionsCounters.java @@ -15,15 +15,21 @@ */ package com.couchbase.client.core.transaction.context; +import com.couchbase.client.core.Core; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.cnc.Counter; import com.couchbase.client.core.cnc.Meter; import com.couchbase.client.core.cnc.TracingIdentifiers; -import com.couchbase.client.core.cnc.ValueRecorder; +import com.couchbase.client.core.config.ClusterConfig; +import com.couchbase.client.core.topology.ClusterIdentifier; +import com.couchbase.client.core.topology.ClusterIdentifierUtil; import com.couchbase.client.core.util.CbCollections; +import reactor.util.annotation.Nullable; import java.util.HashMap; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; import static com.couchbase.client.core.cnc.TracingIdentifiers.METER_TRANSACTION_ATTEMPTS; import static com.couchbase.client.core.cnc.TracingIdentifiers.METER_TRANSACTION_TOTAL; @@ -31,21 +37,63 @@ @Stability.Internal public class CoreTransactionsCounters { - private final Counter transactions; - private final Counter attempts; - // A histogram of transaction durations - - public CoreTransactionsCounters(Meter meter) { - Map tags = CbCollections.mapOf(TracingIdentifiers.ATTR_SERVICE, SERVICE_TRANSACTIONS); - transactions = meter.counter(METER_TRANSACTION_TOTAL, tags); - attempts = meter.counter(METER_TRANSACTION_ATTEMPTS, tags); - } - - public Counter attempts() { - return attempts; - } - - public Counter transactions() { - return transactions; - } + @Stability.Internal + public static class TransactionMetricIdentifier { + + private final @Nullable String clusterName; + private final @Nullable String clusterUuid; + + TransactionMetricIdentifier(@Nullable ClusterIdentifier clusterIdent) { + clusterName = clusterIdent == null ? null : clusterIdent.clusterName(); + clusterUuid = clusterIdent == null ? null : clusterIdent.clusterUuid(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TransactionMetricIdentifier that = (TransactionMetricIdentifier) o; + return Objects.equals(clusterName, that.clusterName) + && Objects.equals(clusterUuid, that.clusterUuid); + } + + @Override + public int hashCode() { + return Objects.hash(clusterName, clusterUuid); + } + } + + private final Map transactionsMetrics = new ConcurrentHashMap<>(); + private final Map attemptMetrics = new ConcurrentHashMap<>(); + private final Core core; + private final Meter meter; + + public CoreTransactionsCounters(Core core, Meter meter) { + this.core = core; + this.meter = meter; + } + + public Counter attempts() { + return genericCounter(METER_TRANSACTION_ATTEMPTS, attemptMetrics); + } + + public Counter transactions() { + return genericCounter(METER_TRANSACTION_TOTAL, transactionsMetrics); + } + + private Counter genericCounter(String name, Map metricsMap) { + ClusterConfig config = core.configurationProvider().config(); + ClusterIdentifier clusterIdent = ClusterIdentifierUtil.fromConfig(config); + return metricsMap.computeIfAbsent(new TransactionMetricIdentifier(clusterIdent), id -> { + HashMap tags = new HashMap<>(); + tags.put(TracingIdentifiers.ATTR_SYSTEM, TracingIdentifiers.ATTR_SYSTEM_COUCHBASE); + if (id.clusterName != null) { + tags.put(TracingIdentifiers.ATTR_CLUSTER_NAME, id.clusterName); + } + if (id.clusterUuid != null) { + tags.put(TracingIdentifiers.ATTR_CLUSTER_UUID, id.clusterUuid); + } + return meter.counter(name, tags); + }); + } } diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/forwards/CoreTransactionsExtension.java b/core-io/src/main/java/com/couchbase/client/core/transaction/forwards/CoreTransactionsExtension.java index 18e7844b2..daec50408 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/forwards/CoreTransactionsExtension.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/forwards/CoreTransactionsExtension.java @@ -53,6 +53,7 @@ public enum CoreTransactionsExtension { EXT_QUERY_CONTEXT("QC"), EXT_BINARY_SUPPORT("BS"), EXT_PARALLEL_UNSTAGING("PU"), + EXT_REPLICA_FROM_PREFERRED_GROUP("RP"), ; private final String value; diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/util/MeteringUnits.java b/core-io/src/main/java/com/couchbase/client/core/transaction/util/MeteringUnits.java index bd215c96c..a2b4ea4dc 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/util/MeteringUnits.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/util/MeteringUnits.java @@ -16,6 +16,7 @@ package com.couchbase.client.core.transaction.util; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreKvResponseMetadata; import com.couchbase.client.core.error.CouchbaseException; import com.couchbase.client.core.error.context.KeyValueErrorContext; import com.couchbase.client.core.io.netty.kv.MemcacheProtocol; @@ -81,6 +82,15 @@ public void add(@Nullable MemcacheProtocol.FlexibleExtras flexibleExtras) { } } + public void add(CoreKvResponseMetadata meta) { + if (meta.readUnits() != null) { + readUnits += meta.readUnits(); + } + if (meta.writeUnits() != null) { + writeUnits += meta.writeUnits(); + } + } + public void add(Throwable err) { add(from(err)); } diff --git a/core-io/src/main/java/com/couchbase/client/core/transaction/util/TransactionKVHandler.java b/core-io/src/main/java/com/couchbase/client/core/transaction/util/TransactionKVHandler.java index 883f4ef7e..1be6c8e2d 100644 --- a/core-io/src/main/java/com/couchbase/client/core/transaction/util/TransactionKVHandler.java +++ b/core-io/src/main/java/com/couchbase/client/core/transaction/util/TransactionKVHandler.java @@ -16,10 +16,16 @@ package com.couchbase.client.core.transaction.util; import com.couchbase.client.core.Core; +import com.couchbase.client.core.CoreKeyspace; +import com.couchbase.client.core.Reactor; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreReadPreference; +import com.couchbase.client.core.api.kv.CoreSubdocGetResult; import com.couchbase.client.core.cnc.TracingIdentifiers; import com.couchbase.client.core.config.BucketConfig; import com.couchbase.client.core.error.DocumentNotFoundException; +import com.couchbase.client.core.error.DocumentUnretrievableException; +import com.couchbase.client.core.error.context.ReducedKeyValueErrorContext; import com.couchbase.client.core.io.CollectionIdentifier; import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.DurabilityLevel; @@ -32,6 +38,7 @@ import com.couchbase.client.core.msg.kv.SubdocMutateRequest; import com.couchbase.client.core.msg.kv.SubdocMutateResponse; import com.couchbase.client.core.retry.BestEffortRetryStrategy; +import com.couchbase.client.core.service.kv.ReplicaHelper; import com.couchbase.client.core.transaction.support.SpanWrapper; import com.couchbase.client.core.transaction.log.CoreTransactionLogger; import com.couchbase.client.core.transaction.support.SpanWrapperUtil; @@ -46,6 +53,7 @@ import java.util.concurrent.CompletableFuture; import static com.couchbase.client.core.error.DefaultErrorUtil.keyValueStatusToException; +import static com.couchbase.client.core.msg.kv.SubdocGetRequest.convertCommandsToCore; /** * Transactions does a lot of KV work from core-io. This logic is essentially a mini version of java-client, providing @@ -67,7 +75,7 @@ public static Mono insert(final Core core, final SpanWrapper pspan) { return Mono.defer(() -> { long start = System.nanoTime(); - SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().environment().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_INSERT, pspan); + SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().coreResources().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_INSERT, pspan); InsertRequest request = new InsertRequest(id, transcodedContent, @@ -105,7 +113,7 @@ public static Mono remove(final Core core, final SpanWrapper pspan) { return Mono.defer(() -> { long start = System.nanoTime(); - SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().environment().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_REMOVE, pspan); + SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().coreResources().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_REMOVE, pspan); RemoveRequest request = new RemoveRequest(id, cas, @@ -131,17 +139,29 @@ public static Mono remove(final Core core, }); } - public static Mono lookupIn(final Core core, + public static Mono lookupIn(final Core core, CollectionIdentifier collectionIdentifier, final String id, final Duration timeout, boolean accessDeleted, final Map clientContext, @Nullable final SpanWrapper pspan, + boolean preferredReplicaMode, final List commands) { return Mono.defer(() -> { long start = System.nanoTime(); - SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().environment().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_LOOKUP_IN, pspan); + SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().coreResources().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_LOOKUP_IN, pspan); + + + if (preferredReplicaMode) { + CompletableFuture replicas = ReplicaHelper.lookupInAnyReplicaAsync(core, collectionIdentifier, id, convertCommandsToCore(commands), timeout, BestEffortRetryStrategy.INSTANCE, + clientContext, pspan == null ? null : pspan.span(), CoreReadPreference.PREFERRED_SERVER_GROUP, (r) -> r); + + return Reactor.wrap(replicas, () -> {}) + .switchIfEmpty(Mono.error(new DocumentUnretrievableException(ReducedKeyValueErrorContext.create(id, collectionIdentifier)))) + .doOnError(span::recordException) + .doOnTerminate(span::finish); + } byte flags = 0; if (accessDeleted) { @@ -166,7 +186,7 @@ public static Mono lookupIn(final Core core, .response() .thenApply(response -> { if (response.status().success() || response.status() == ResponseStatus.SUBDOC_FAILURE) { - return response; + return response.toCore(CoreKeyspace.from(collectionIdentifier), id); } throw keyValueStatusToException(request, response); }) @@ -230,7 +250,7 @@ public static Mono mutateIn(final Core core, final List commands, CoreTransactionLogger logger) { return Mono.defer(() -> { - SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().environment().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_MUTATE_IN, pspan); + SpanWrapper span = SpanWrapperUtil.createOp(null, core.context().coreResources().requestTracer(), collectionIdentifier, id, TracingIdentifiers.SPAN_REQUEST_KV_MUTATE_IN, pspan); long start = System.nanoTime(); final boolean requiresBucketConfig = createAsDeleted || reviveDocument; diff --git a/core-io/src/main/java/com/couchbase/client/core/util/ClusterCapabilitiesUtil.java b/core-io/src/main/java/com/couchbase/client/core/util/ClusterCapabilitiesUtil.java deleted file mode 100644 index 0a69e04d0..000000000 --- a/core-io/src/main/java/com/couchbase/client/core/util/ClusterCapabilitiesUtil.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2024 Couchbase, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.couchbase.client.core.util; - -import com.couchbase.client.core.Core; -import com.couchbase.client.core.annotation.Stability; -import com.couchbase.client.core.config.BucketConfig; -import com.couchbase.client.core.config.ClusterCapabilities; -import com.couchbase.client.core.config.GlobalConfig; -import com.couchbase.client.core.error.UnambiguousTimeoutException; -import com.couchbase.client.core.retry.reactor.Backoff; -import com.couchbase.client.core.retry.reactor.Retry; -import com.couchbase.client.core.retry.reactor.RetryExhaustedException; -import com.couchbase.client.core.service.ServiceType; -import reactor.core.publisher.Mono; - -import java.time.Duration; -import java.util.Map; -import java.util.Set; - -/** - * Defines helpful routines for working with cluster capabilities. - */ -@Stability.Internal -public class ClusterCapabilitiesUtil { - private ClusterCapabilitiesUtil() { - } - - private static final Duration retryDelay = Duration.ofMillis(100); - - public static Mono>> waitForClusterCapabilities(final Core core, - final Duration timeout) { - return Mono.fromCallable(() -> { - // Cluster capabilities should be the same across global and bucket configs, so just use whatever is available. - GlobalConfig globalConfig = core.clusterConfig().globalConfig(); - if (globalConfig != null) { - return globalConfig.clusterCapabilities(); - } - Map bucketConfigs = core.clusterConfig().bucketConfigs(); - if (bucketConfigs != null && !bucketConfigs.isEmpty()) { - return bucketConfigs.values().iterator().next().clusterCapabilities(); - } - throw new NullPointerException(); - }).retryWhen(Retry.anyOf(NullPointerException.class) - .timeout(timeout) - .backoff(Backoff.fixed(retryDelay)) - .toReactorRetry()) - .onErrorResume(err -> { - if (err instanceof RetryExhaustedException) { - return Mono.error(new UnambiguousTimeoutException("Timed out while waiting for global config", null)); - } else { - return Mono.error(err); - } - }); - } -} diff --git a/core-io/src/main/java/com/couchbase/client/core/util/ConsistencyUtil.java b/core-io/src/main/java/com/couchbase/client/core/util/ConsistencyUtil.java index b0304b45e..2903d1600 100644 --- a/core-io/src/main/java/com/couchbase/client/core/util/ConsistencyUtil.java +++ b/core-io/src/main/java/com/couchbase/client/core/util/ConsistencyUtil.java @@ -25,13 +25,12 @@ import com.couchbase.client.core.endpoint.http.CoreHttpRequest; import com.couchbase.client.core.endpoint.http.CoreHttpResponse; import com.couchbase.client.core.error.HttpStatusCodeException; -import com.couchbase.client.core.error.IndexNotFoundException; import com.couchbase.client.core.error.RequestCanceledException; import com.couchbase.client.core.error.ViewServiceException; import com.couchbase.client.core.msg.CancellationReason; import com.couchbase.client.core.msg.RequestTarget; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.transaction.util.TriFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -274,7 +273,7 @@ private static Set getConfig(Core core) { if (core.clusterConfig().globalConfig() != null) { List nodes = core.clusterConfig().globalConfig().portInfos() .stream() - .map(PortInfo::identifier) + .map(PortInfo::id) .collect(Collectors.toList()); logger.info("Adding nodes from global config: {}", nodes); @@ -286,7 +285,7 @@ private static Set getConfig(Core core) { if (core.clusterConfig().bucketConfigs() != null) { List nodes = core.clusterConfig().bucketConfigs().entrySet() .stream() - .flatMap(v -> v.getValue().nodes().stream().map(x -> x.identifier())) + .flatMap(v -> v.getValue().nodes().stream().map(x -> x.id())) .collect(Collectors.toList()); logger.info("Adding nodes from bucket configs: {}", nodes); @@ -340,7 +339,7 @@ private static void waitUntilAllNodesMatchPredicate(Core core, while (!done) { - String debug = String.format("%s:%d waiting for %s", node.address(), node.managerPort(), predicateDesc); + String debug = node + " waiting for " + predicateDesc; CoreHttpRequest request = createRequest.apply(node); diff --git a/core-io/src/main/java/com/couchbase/client/core/util/NativeImageHelper.java b/core-io/src/main/java/com/couchbase/client/core/util/NativeImageHelper.java new file mode 100644 index 000000000..8d3f0240c --- /dev/null +++ b/core-io/src/main/java/com/couchbase/client/core/util/NativeImageHelper.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.core.util; + +// CHECKSTYLE:OFF IllegalImport - Allow usage of jctools classes + +import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.annotation.UsedBy; +import com.couchbase.client.core.deps.org.jctools.queues.MpscArrayQueue; +import java.util.Queue; + +import static com.couchbase.client.core.annotation.UsedBy.Project.QUARKUS_COUCHBASE; + +@Stability.Internal +public class NativeImageHelper { + + private NativeImageHelper() { + } + + /** + * This static factory method has been added in order to help the substitutions + * for native image compatibility in the Quarkus extension. + * In the extension, this is substituted for an MpscAtomicUnpaddedArrayQueue, + * a slightly less performant but memory safe variant. + * See {@link com.couchbase.client.core.deps.org.jctools.queues.MpscArrayQueue} and {@link com.couchbase.client.core.deps.org.jctools.queues.atomic.unpadded.MpscAtomicUnpaddedArrayQueue}. + * @param capacity The capacity of the queue + * @return A new MpscArrayQueue. + * @param The type held by the queue. + */ + @UsedBy(QUARKUS_COUCHBASE) + public static Queue createMpscArrayQueue(int capacity) { + return new MpscArrayQueue<>(capacity); + } +} diff --git a/core-io/src/main/java/com/couchbase/client/core/util/ReactorOps.java b/core-io/src/main/java/com/couchbase/client/core/util/ReactorOps.java new file mode 100644 index 000000000..e474224b6 --- /dev/null +++ b/core-io/src/main/java/com/couchbase/client/core/util/ReactorOps.java @@ -0,0 +1,89 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.core.util; + + +import com.couchbase.client.core.Reactor; +import com.couchbase.client.core.annotation.Stability; +import reactor.core.CorePublisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.util.annotation.Nullable; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; + +@Stability.Internal +public interface ReactorOps { + + default Mono publishOnUserScheduler(Supplier> future) { + return publishOnUserScheduler(Reactor.toMono(future)); + } + + Mono publishOnUserScheduler(Mono mono); + + Flux publishOnUserScheduler(Flux mono); + + /** + * Returns a dynamic proxy for the given object + * (unless the given supplier is null, in which case + * the same object is returned). + *

+ * Any Flux or Mono instances returned by the proxied interface methods + * are published on the scheduler returned by the given supplier. + */ + @Stability.Internal + static T proxyToPublishOnSuppliedScheduler( + T obj, + Class interfaceToProxy, + @Nullable Supplier scheduler + ) { + if (scheduler == null) { + return obj; + } + return interfaceToProxy.cast( + Proxy.newProxyInstance( + interfaceToProxy.getClassLoader(), + new Class[]{interfaceToProxy}, + new InvocationHandler() { + @SuppressWarnings("ReactiveStreamsUnusedPublisher") + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + try { + Object result = method.invoke(obj, args); + if (result instanceof CorePublisher) { + if (result instanceof Mono) { + return Mono.defer(() -> ((Mono) result).publishOn(scheduler.get())); + } + if (result instanceof Flux) { + return Flux.defer(() -> ((Flux) result).publishOn(scheduler.get())); + } + } + return result; + } catch (InvocationTargetException e) { + throw e.getCause(); + } + } + }) + ); + } +} diff --git a/core-io/src/test/java/com/couchbase/client/core/CoreContextTest.java b/core-io/src/test/java/com/couchbase/client/core/CoreContextTest.java index 546d259d8..a7043d1ad 100644 --- a/core-io/src/test/java/com/couchbase/client/core/CoreContextTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/CoreContextTest.java @@ -21,6 +21,7 @@ import com.couchbase.client.core.env.CoreEnvironment; import org.junit.jupiter.api.Test; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; @@ -33,7 +34,7 @@ class CoreContextTest { void getAndExportProperties() { long id = 12345; CoreEnvironment env = mock(CoreEnvironment.class); - Core core = mock(Core.class); + Core core = mockCore(); Authenticator authenticator = mock(Authenticator.class); CoreContext ctx = new CoreContext(core, id, env, authenticator); diff --git a/core-io/src/test/java/com/couchbase/client/core/CoreTest.java b/core-io/src/test/java/com/couchbase/client/core/CoreTest.java index 14272acfb..5eb9fb520 100644 --- a/core-io/src/test/java/com/couchbase/client/core/CoreTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/CoreTest.java @@ -29,8 +29,8 @@ import com.couchbase.client.core.error.GlobalConfigNotFoundException; import com.couchbase.client.core.error.UnsupportedConfigMechanismException; import com.couchbase.client.core.node.Node; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.ConnectionString; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -43,10 +43,12 @@ import reactor.core.publisher.Sinks; import java.util.Arrays; -import java.util.HashMap; import java.util.Map; import java.util.Optional; +import static com.couchbase.client.core.topology.TopologyTestUtils.nodeId; +import static com.couchbase.client.core.topology.TopologyTestUtils.topologyParser; +import static com.couchbase.client.core.util.CbCollections.mapOf; import static com.couchbase.client.test.Util.readResource; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; @@ -124,9 +126,10 @@ void addNodesAndServicesOnNewConfig() throws Exception { configureMock(mock101, "mock101", "10.143.190.101", 8091); configureMock(mock102, "mock102", "10.143.190.102", 8091); - final Map mocks = new HashMap<>(); - mocks.put("10.143.190.101", mock101); - mocks.put("10.143.190.102", mock102); + final Map mocks = mapOf( + mock101.identifier(), mock101, + mock102.identifier(), mock102 + ); try (Core core = new Core(ENV, AUTHENTICATOR, CONNECTION_STRING) { @Override public ConfigurationProvider createConfigurationProvider() { @@ -135,17 +138,15 @@ public ConfigurationProvider createConfigurationProvider() { @Override protected Node createNode(final NodeIdentifier target) { - return mocks.get(target.address()); + return mocks.get(target); } }) { logger.info("Validating"); verify(mock101, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); verify(mock102, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); - BucketConfig oneNodeConfig = BucketConfigParser.parse( - readResource("one_node_config.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig oneNodeConfig = topologyParser().parseBucketConfig( + readResource("one_node_config.json", CoreTest.class) ); mockConfigProvider.accept(oneNodeConfig); @@ -167,10 +168,8 @@ protected Node createNode(final NodeIdentifier target) { verify(mock102, never()).addService(ServiceType.QUERY, 8093, Optional.empty()); verify(mock102, never()).addService(ServiceType.KV, 11210, Optional.of("travel-sample")); - BucketConfig twoNodeConfig = BucketConfigParser.parse( - readResource("two_nodes_config.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig twoNodeConfig = topologyParser().parseBucketConfig( + readResource("two_nodes_config.json", CoreTest.class) ); mockConfigProvider.accept(twoNodeConfig); @@ -196,7 +195,7 @@ protected Node createNode(final NodeIdentifier target) { } void configureMock(Node mock, String id, String ip, int port) { - when(mock.identifier()).thenReturn(new NodeIdentifier(ip, port)); + when(mock.identifier()).thenReturn(nodeId(ip, port)); when(mock.addService(any(ServiceType.class), anyInt(), any(Optional.class))) .thenAnswer((Answer>) invocation -> { logger.info("{}.addService called with arguments: {}", id, Arrays.toString(invocation.getArguments())); @@ -222,9 +221,10 @@ void addServicesOnNewConfig() throws Exception { configureMock(mock101, "mock101", "10.143.190.101", 8091); configureMock(mock102, "mock102", "10.143.190.102", 8091); - final Map mocks = new HashMap<>(); - mocks.put("10.143.190.101", mock101); - mocks.put("10.143.190.102", mock102); + final Map mocks = mapOf( + mock101.identifier(), mock101, + mock102.identifier(), mock102 + ); try (Core core = new Core(ENV, AUTHENTICATOR, CONNECTION_STRING) { @Override public ConfigurationProvider createConfigurationProvider() { @@ -233,17 +233,15 @@ public ConfigurationProvider createConfigurationProvider() { @Override protected Node createNode(final NodeIdentifier target) { - return mocks.get(target.address()); + return mocks.get(target); } }) { logger.info("Validating"); verify(mock101, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); verify(mock102, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); - BucketConfig twoNodesConfig = BucketConfigParser.parse( - readResource("two_nodes_config.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig twoNodesConfig = topologyParser().parseBucketConfig( + readResource("two_nodes_config.json", CoreTest.class) ); mockConfigProvider.accept(twoNodesConfig); @@ -266,10 +264,8 @@ protected Node createNode(final NodeIdentifier target) { verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.KV, 11210, Optional.of("travel-sample")); - BucketConfig twoNodesConfigMore = BucketConfigParser.parse( - readResource("two_nodes_config_more_services.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig twoNodesConfigMore = topologyParser().parseBucketConfig( + readResource("two_nodes_config_more_services.json", CoreTest.class) ); mockConfigProvider.accept(twoNodesConfigMore); @@ -306,9 +302,10 @@ void removeNodesAndServicesOnNewConfig() throws Exception { configureMock(mock101, "mock101", "10.143.190.101", 8091); configureMock(mock102, "mock102", "10.143.190.102", 8091); - final Map mocks = new HashMap<>(); - mocks.put("10.143.190.101", mock101); - mocks.put("10.143.190.102", mock102); + final Map mocks = mapOf( + mock101.identifier(), mock101, + mock102.identifier(), mock102 + ); try (Core core = new Core(ENV, AUTHENTICATOR, CONNECTION_STRING) { @Override public ConfigurationProvider createConfigurationProvider() { @@ -318,17 +315,15 @@ public ConfigurationProvider createConfigurationProvider() { @Override protected Node createNode(final NodeIdentifier target) { logger.info("createNode {}", target); - return mocks.get(target.address()); + return mocks.get(target); } }) { logger.info("Validating"); verify(mock101, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); verify(mock102, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); - BucketConfig twoNodesConfig = BucketConfigParser.parse( - readResource("two_nodes_config_more_services.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig twoNodesConfig = topologyParser().parseBucketConfig( + readResource("two_nodes_config_more_services.json", CoreTest.class) ); mockConfigProvider.accept(twoNodesConfig); @@ -340,7 +335,7 @@ protected Node createNode(final NodeIdentifier target) { verify(mock101, timeout(TIMEOUT).times(1)) .addService(ServiceType.QUERY, 8093, Optional.empty()); verify(mock101, timeout(TIMEOUT).times(1)) - .addService(ServiceType.KV, 11210, Optional.of("travel-sample")); + .addService(ServiceType.KV, 11210, Optional.of(twoNodesConfig.name())); verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.VIEWS, 8092, Optional.empty()); @@ -349,14 +344,12 @@ protected Node createNode(final NodeIdentifier target) { verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.QUERY, 8093, Optional.empty()); verify(mock102, timeout(TIMEOUT).times(1)) - .addService(ServiceType.KV, 11210, Optional.of("travel-sample")); + .addService(ServiceType.KV, 11210, Optional.of(twoNodesConfig.name())); verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.SEARCH, 8094, Optional.empty()); - BucketConfig twoNodesLessServices = BucketConfigParser.parse( - readResource("two_nodes_config.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig twoNodesLessServices = topologyParser().parseBucketConfig( + readResource("two_nodes_config.json", CoreTest.class) ); mockConfigProvider.accept(twoNodesLessServices); @@ -375,9 +368,10 @@ void removesNodeIfNotPresentInConfigAnymore() throws Exception { configureMock(mock101, "mock101", "10.143.190.101", 8091); configureMock(mock102, "mock102", "10.143.190.102", 8091); - final Map mocks = new HashMap<>(); - mocks.put("10.143.190.101", mock101); - mocks.put("10.143.190.102", mock102); + final Map mocks = mapOf( + mock101.identifier(), mock101, + mock102.identifier(), mock102 + ); try (Core core = new Core(ENV, AUTHENTICATOR, CONNECTION_STRING) { @Override public ConfigurationProvider createConfigurationProvider() { @@ -386,17 +380,15 @@ public ConfigurationProvider createConfigurationProvider() { @Override protected Node createNode(final NodeIdentifier target) { - return mocks.get(target.address()); + return mocks.get(target); } }) { logger.info("Validating"); verify(mock101, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); verify(mock102, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); - BucketConfig twoNodesConfig = BucketConfigParser.parse( - readResource("two_nodes_config_more_services.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig twoNodesConfig = topologyParser().parseBucketConfig( + readResource("two_nodes_config_more_services.json", CoreTest.class) ); mockConfigProvider.accept(twoNodesConfig); @@ -408,7 +400,7 @@ protected Node createNode(final NodeIdentifier target) { verify(mock101, timeout(TIMEOUT).times(1)) .addService(ServiceType.QUERY, 8093, Optional.empty()); verify(mock101, timeout(TIMEOUT).times(1)) - .addService(ServiceType.KV, 11210, Optional.of("travel-sample")); + .addService(ServiceType.KV, 11210, Optional.of(twoNodesConfig.name())); verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.VIEWS, 8092, Optional.empty()); @@ -417,14 +409,12 @@ protected Node createNode(final NodeIdentifier target) { verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.QUERY, 8093, Optional.empty()); verify(mock102, timeout(TIMEOUT).times(1)) - .addService(ServiceType.KV, 11210, Optional.of("travel-sample")); + .addService(ServiceType.KV, 11210, Optional.of(twoNodesConfig.name())); verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.SEARCH, 8094, Optional.empty()); - BucketConfig twoNodesLessServices = BucketConfigParser.parse( - readResource("one_node_config.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig twoNodesLessServices = topologyParser().parseBucketConfig( + readResource("one_node_config.json", CoreTest.class) ); mockConfigProvider.accept(twoNodesLessServices); @@ -444,13 +434,13 @@ void addsSecondNodeIfBothSameHostname() throws Exception { Node mock101 = mock(Node.class); Node mock102 = mock(Node.class); - configureMock(mock101, "mock101", LOCALHOST, 9000); - configureMock(mock102, "mock102", LOCALHOST, 9001); - - final Map mocks = new HashMap<>(); + configureMock(mock101, "mock101", "192.168.1.194", 9000); + configureMock(mock102, "mock102", "192.168.1.194", 9001); - mocks.put("127.0.0.1:9000", mock101); - mocks.put("127.0.0.1:9001", mock102); + final Map mocks = mapOf( + mock101.identifier(), mock101, + mock102.identifier(), mock102 + ); try (Core core = new Core(ENV, AUTHENTICATOR, CONNECTION_STRING) { @Override public ConfigurationProvider createConfigurationProvider() { @@ -459,17 +449,15 @@ public ConfigurationProvider createConfigurationProvider() { @Override protected Node createNode(final NodeIdentifier target) { - return mocks.get(target.address() + ":" + target.managerPort()); + return mocks.get(target); } }) { logger.info("Validating"); verify(mock101, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); verify(mock102, timeout(TIMEOUT).times(0)).addService(any(), anyInt(), any()); - BucketConfig oneNodeConfig = BucketConfigParser.parse( - readResource("cluster_run_two_nodes.json", CoreTest.class), - ENV, - LOCALHOST + BucketConfig oneNodeConfig = topologyParser().parseBucketConfig( + readResource("config/cluster_run_two_nodes_same_host.json", CoreTest.class) ); mockConfigProvider.accept(oneNodeConfig); @@ -479,14 +467,14 @@ protected Node createNode(final NodeIdentifier target) { verify(mock101, timeout(TIMEOUT).times(1)) .addService(ServiceType.MANAGER, 9000, Optional.empty()); verify(mock101, timeout(TIMEOUT).times(1)) - .addService(ServiceType.KV, 12000, Optional.of("default")); + .addService(ServiceType.KV, 12000, Optional.of(oneNodeConfig.name())); verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.VIEWS, 9501, Optional.empty()); verify(mock102, timeout(TIMEOUT).times(1)) .addService(ServiceType.MANAGER, 9001, Optional.empty()); verify(mock102, timeout(TIMEOUT).times(1)) - .addService(ServiceType.KV, 12002, Optional.of("default")); + .addService(ServiceType.KV, 12002, Optional.of(oneNodeConfig.name())); } } diff --git a/core-io/src/test/java/com/couchbase/client/core/TimerTest.java b/core-io/src/test/java/com/couchbase/client/core/TimerTest.java index af76bbeab..2e090c52a 100644 --- a/core-io/src/test/java/com/couchbase/client/core/TimerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/TimerTest.java @@ -24,6 +24,7 @@ import java.time.Duration; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -38,7 +39,7 @@ class TimerTest { void performsBackpressureWhenOverLimit() throws Exception { Timer timer = Timer.createAndStart(2); try { - Core core = mock(Core.class); + Core core = mockCore(); assertEquals(0, timer.outstandingForRetry()); Request request = mock(Request.class); @@ -81,4 +82,4 @@ void assignsRegistrationToRequest() { } } -} \ No newline at end of file +} diff --git a/core-io/src/test/java/com/couchbase/client/core/config/ClusterConfigTest.java b/core-io/src/test/java/com/couchbase/client/core/config/ClusterConfigTest.java index 028ea8bc2..1762a882e 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/ClusterConfigTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/ClusterConfigTest.java @@ -34,7 +34,7 @@ class ClusterConfigTest { private static PortInfo minimalPortInfo(String host) { - return new PortInfo(mapOf("mgmt", 8091), host, emptyMap()); + return new PortInfo(mapOf("mgmt", 8091), host, emptyMap(), null); } private static NodeInfo minimalNodeInfo(String host, int port) { diff --git a/core-io/src/test/java/com/couchbase/client/core/config/DefaultConfigurationProviderTest.java b/core-io/src/test/java/com/couchbase/client/core/config/DefaultConfigurationProviderTest.java index 98e3bc7d1..9c3aeabf6 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/DefaultConfigurationProviderTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/DefaultConfigurationProviderTest.java @@ -31,8 +31,9 @@ import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.GetCollectionIdRequest; import com.couchbase.client.core.msg.kv.GetCollectionIdResponse; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.ConnectionString; +import com.couchbase.client.core.util.MockUtil; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; @@ -287,7 +288,7 @@ static Core mockCore(CoreEnvironment env) { CoreContext ctx = mock(CoreContext.class); when(ctx.environment()).thenReturn(env); - Core core = mock(Core.class); + Core core = MockUtil.mockCore(); when(core.context()).thenReturn(ctx); when(core.environment()).thenReturn(env); return core; diff --git a/core-io/src/test/java/com/couchbase/client/core/config/loader/BaseBucketLoaderTest.java b/core-io/src/test/java/com/couchbase/client/core/config/loader/BaseBucketLoaderTest.java index 7ad761f4a..2bf2b6668 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/loader/BaseBucketLoaderTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/loader/BaseBucketLoaderTest.java @@ -25,9 +25,9 @@ import com.couchbase.client.core.env.CoreEnvironment; import com.couchbase.client.core.error.ConfigException; import com.couchbase.client.core.error.CouchbaseException; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.node.StandardMemcachedHashingStrategy; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; @@ -35,6 +35,8 @@ import java.util.Optional; +import static com.couchbase.client.core.topology.TopologyTestUtils.nodeId; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.readResource; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -50,7 +52,7 @@ */ class BaseBucketLoaderTest { - private static final NodeIdentifier SEED = new NodeIdentifier("127.0.0.1", 8091); + private static final NodeIdentifier SEED = nodeId("127.0.0.1", 8091); private static final String BUCKET = "bucket"; private static final int PORT = 1234; private static final ServiceType SERVICE = ServiceType.KV; @@ -60,7 +62,7 @@ class BaseBucketLoaderTest { @BeforeEach void setup() { CoreEnvironment env = mock(CoreEnvironment.class); - core = mock(Core.class); + core = mockCore(); CoreContext ctx = new CoreContext(core, 1, env, mock(Authenticator.class)); when(core.context()).thenReturn(ctx); } diff --git a/core-io/src/test/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderTest.java b/core-io/src/test/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderTest.java index aae355072..16629b213 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/loader/ClusterManagerBucketLoaderTest.java @@ -26,7 +26,7 @@ import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.manager.BucketConfigRequest; import com.couchbase.client.core.msg.manager.BucketConfigResponse; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.retry.BestEffortRetryStrategy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicReference; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -61,7 +62,7 @@ void setup() { when(env.timeoutConfig()).thenReturn(TimeoutConfig.create()); when(env.retryStrategy()).thenReturn(BestEffortRetryStrategy.INSTANCE); - core = mock(Core.class); + core = mockCore(); CoreContext ctx = new CoreContext(core, 1, env, mock(Authenticator.class)); when(core.context()).thenReturn(ctx); loader = new ClusterManagerBucketLoader(core); diff --git a/core-io/src/test/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderTest.java b/core-io/src/test/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderTest.java index 4275d90ba..3c9b8cb6c 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/loader/KeyValueBucketLoaderTest.java @@ -27,14 +27,15 @@ import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.CarrierBucketConfigRequest; import com.couchbase.client.core.msg.kv.CarrierBucketConfigResponse; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.BestEffortRetryStrategy; +import com.couchbase.client.core.topology.NodeIdentifier; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import reactor.core.Disposable; import java.util.concurrent.atomic.AtomicReference; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -62,7 +63,7 @@ void setup() { when(env.timeoutConfig()).thenReturn(TimeoutConfig.create()); when(env.retryStrategy()).thenReturn(BestEffortRetryStrategy.INSTANCE); - core = mock(Core.class); + core = mockCore(); CoreContext ctx = new CoreContext(core, 1, env, mock(Authenticator.class)); when(core.context()).thenReturn(ctx); loader = new KeyValueBucketLoader(core); diff --git a/core-io/src/test/java/com/couchbase/client/core/config/refresher/ClusterManagerBucketRefresherTest.java b/core-io/src/test/java/com/couchbase/client/core/config/refresher/ClusterManagerBucketRefresherTest.java index c6255cebb..971a7507c 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/refresher/ClusterManagerBucketRefresherTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/refresher/ClusterManagerBucketRefresherTest.java @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -55,7 +56,7 @@ void beforeEach() { env = CoreEnvironment.builder().eventBus(eventBus).build(); CoreContext coreContext = mock(CoreContext.class); - core = mock(Core.class); + core = mockCore(); when(core.context()).thenReturn(coreContext); when(coreContext.environment()).thenReturn(env); ConfigurationProvider provider = mock(ConfigurationProvider.class); diff --git a/core-io/src/test/java/com/couchbase/client/core/config/refresher/GlobalRefresherTest.java b/core-io/src/test/java/com/couchbase/client/core/config/refresher/GlobalRefresherTest.java index 0c102adcd..e66ae072d 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/refresher/GlobalRefresherTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/refresher/GlobalRefresherTest.java @@ -22,12 +22,12 @@ import com.couchbase.client.core.config.ConfigRefreshFailure; import com.couchbase.client.core.config.ConfigurationProvider; import com.couchbase.client.core.config.GlobalConfig; -import com.couchbase.client.core.config.PortInfo; import com.couchbase.client.core.env.CoreEnvironment; import com.couchbase.client.core.msg.Request; import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.CarrierGlobalConfigRequest; import com.couchbase.client.core.msg.kv.CarrierGlobalConfigResponse; +import com.couchbase.client.core.service.ServiceType; import com.couchbase.client.core.util.Bytes; import com.couchbase.client.core.util.NanoTimestamp; import org.junit.jupiter.api.AfterEach; @@ -36,11 +36,13 @@ import reactor.core.publisher.Flux; import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; +import static com.couchbase.client.core.topology.TopologyTestUtils.clusterTopology; +import static com.couchbase.client.core.topology.TopologyTestUtils.node; +import static com.couchbase.client.core.util.CbCollections.listOf; import static com.couchbase.client.core.util.CbCollections.mapOf; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -67,7 +69,7 @@ void beforeEach() { .build(); CoreContext coreContext = mock(CoreContext.class); - core = mock(Core.class); + core = mockCore(); when(core.context()).thenReturn(coreContext); when(coreContext.environment()).thenReturn(env); } @@ -84,12 +86,14 @@ void respectsPollInterval() { ClusterConfig clusterConfig = new ClusterConfig(); when(provider.config()).thenReturn(clusterConfig); when(provider.configChangeNotifications()).thenReturn(Flux.empty()); - GlobalConfig config = mock(GlobalConfig.class); + GlobalConfig config = new GlobalConfig(clusterTopology( + listOf( + node("foo", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091)), + node("bar", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091)) + )) + ); + clusterConfig.setGlobalConfig(config); - when(config.portInfos()).thenReturn(Arrays.asList( - new PortInfo(mapOf("kv", 11210, "mgmt", 8091), "foo", Collections.emptyMap()), - new PortInfo(mapOf("kv", 11210, "mgmt", 8091), "bar", Collections.emptyMap()) - )); final AtomicInteger invocationCounter = new AtomicInteger(0); @@ -132,12 +136,14 @@ void triggersEventIfAllNodesFailedToRefresh() { ClusterConfig clusterConfig = new ClusterConfig(); when(provider.config()).thenReturn(clusterConfig); when(provider.configChangeNotifications()).thenReturn(Flux.empty()); - GlobalConfig config = mock(GlobalConfig.class); + GlobalConfig config = new GlobalConfig(clusterTopology( + listOf( + node("foo", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091)), + node("bar", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091)) + )) + ); + clusterConfig.setGlobalConfig(config); - when(config.portInfos()).thenReturn(Arrays.asList( - new PortInfo(mapOf("kv", 11210, "mgmt", 8091), "foo", Collections.emptyMap()), - new PortInfo(mapOf("kv", 11210, "mgmt", 8091), "bar", Collections.emptyMap()) - )); final AtomicInteger invocationCounter = new AtomicInteger(0); diff --git a/core-io/src/test/java/com/couchbase/client/core/config/refresher/KeyValueBucketRefresherTest.java b/core-io/src/test/java/com/couchbase/client/core/config/refresher/KeyValueBucketRefresherTest.java index 2656a6a90..88b01147e 100644 --- a/core-io/src/test/java/com/couchbase/client/core/config/refresher/KeyValueBucketRefresherTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/config/refresher/KeyValueBucketRefresherTest.java @@ -22,9 +22,7 @@ import com.couchbase.client.core.config.ClusterConfig; import com.couchbase.client.core.config.ConfigRefreshFailure; import com.couchbase.client.core.config.ConfigurationProvider; -import com.couchbase.client.core.config.NodeInfo; import com.couchbase.client.core.env.CoreEnvironment; -import com.couchbase.client.core.env.IoConfig; import com.couchbase.client.core.msg.Request; import com.couchbase.client.core.msg.ResponseStatus; import com.couchbase.client.core.msg.kv.CarrierBucketConfigRequest; @@ -37,11 +35,12 @@ import reactor.core.publisher.Flux; import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; +import static com.couchbase.client.core.topology.TopologyTestUtils.nodeInfo; +import static com.couchbase.client.core.util.CbCollections.listOf; import static com.couchbase.client.core.util.CbCollections.mapOf; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.doAnswer; @@ -66,7 +65,7 @@ void beforeEach() { .build(); CoreContext coreContext = mock(CoreContext.class); - core = mock(Core.class); + core = mockCore(); when(core.context()).thenReturn(coreContext); when(coreContext.environment()).thenReturn(env); } @@ -87,11 +86,9 @@ void triggersEventIfAllNodesFailedToRefresh() { BucketConfig config = mock(BucketConfig.class); when(config.name()).thenReturn("bucket"); clusterConfig.setBucketConfig(config); - when(config.nodes()).thenReturn(Arrays.asList( - new NodeInfo("foo", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091), - Collections.emptyMap(), Collections.emptyMap()), - new NodeInfo("bar", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091), - Collections.emptyMap(), Collections.emptyMap()) + when(config.nodes()).thenReturn(listOf( + nodeInfo("foo", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091)), + nodeInfo("bar", mapOf(ServiceType.KV, 11210, ServiceType.MANAGER, 8091)) )); final AtomicInteger invocationCounter = new AtomicInteger(0); diff --git a/core-io/src/test/java/com/couchbase/client/core/endpoint/BaseEndpointTest.java b/core-io/src/test/java/com/couchbase/client/core/endpoint/BaseEndpointTest.java index 3d4b05945..5e1eda3fd 100644 --- a/core-io/src/test/java/com/couchbase/client/core/endpoint/BaseEndpointTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/endpoint/BaseEndpointTest.java @@ -57,6 +57,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -91,7 +92,7 @@ void beforeEach() { eventLoopGroup = new NioEventLoopGroup(1); eventBus = new SimpleEventBus(true, Collections.singletonList(EndpointStateChangedEvent.class)); environment = CoreEnvironment.builder().eventBus(eventBus).build(); - CoreContext coreContext = new CoreContext(mock(Core.class), 1, environment, authenticator); + CoreContext coreContext = new CoreContext(mockCore(), 1, environment, authenticator); ctx = new ServiceContext(coreContext, LOCALHOST, 1234, ServiceType.KV, Optional.empty()); } diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/ErrorMapLoadingHandlerTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/ErrorMapLoadingHandlerTest.java index 87aad6089..5dd1bccc6 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/ErrorMapLoadingHandlerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/ErrorMapLoadingHandlerTest.java @@ -49,6 +49,7 @@ import static com.couchbase.client.core.io.netty.kv.ProtocolVerifier.decodeHexDump; import static com.couchbase.client.core.io.netty.kv.ProtocolVerifier.verifyRequest; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.readResource; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -77,7 +78,7 @@ protected void beforeEach() { when(env.eventBus()).thenReturn(eventBus); when(env.timeoutConfig()).thenReturn(timeoutConfig); when(timeoutConfig.connectTimeout()).thenReturn(Duration.ofMillis(1000)); - CoreContext coreContext = new CoreContext(mock(Core.class), 1, env, mock(Authenticator.class)); + CoreContext coreContext = new CoreContext(mockCore(), 1, env, mock(Authenticator.class)); endpointContext = new EndpointContext(coreContext, new HostAndPort("127.0.0.1", 1234), null, ServiceType.KV, Optional.empty(), Optional.empty(), Optional.empty()); } diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/FeatureNegotiatingHandlerTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/FeatureNegotiatingHandlerTest.java index 6ac63c58e..cb5cadded 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/FeatureNegotiatingHandlerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/FeatureNegotiatingHandlerTest.java @@ -18,6 +18,7 @@ import static com.couchbase.client.core.io.netty.kv.ProtocolVerifier.decodeHexDump; import static com.couchbase.client.core.io.netty.kv.ProtocolVerifier.verifyRequest; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.readResource; import static com.couchbase.client.test.Util.waitUntilCondition; import static java.nio.charset.StandardCharsets.UTF_8; @@ -87,7 +88,7 @@ protected void beforeEach() { when(env.timeoutConfig()).thenReturn(timeoutConfig); when(env.userAgent()).thenReturn(new UserAgent("some", Optional.empty(), Optional.empty(), Optional.empty())); when(timeoutConfig.connectTimeout()).thenReturn(Duration.ofMillis(1000)); - CoreContext coreContext = new CoreContext(mock(Core.class), 1, env, mock(Authenticator.class)); + CoreContext coreContext = new CoreContext(mockCore(), 1, env, mock(Authenticator.class)); endpointContext = new EndpointContext(coreContext, new HostAndPort("127.0.0.1", 1234), null, ServiceType.KV, Optional.empty(), Optional.empty(), Optional.empty()); } diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandlerTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandlerTest.java index 623276d53..6c22df92e 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandlerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/KeyValueMessageHandlerTest.java @@ -52,6 +52,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -77,7 +78,7 @@ class KeyValueMessageHandlerTest { @BeforeAll static void setup() { ENV = CoreEnvironment.builder().eventBus(new SimpleEventBus(true)).build(); - Core core = mock(Core.class); + Core core = mockCore(); CoreContext coreContext = new CoreContext(core, 1, ENV, PasswordAuthenticator.create("foo", "bar")); ConfigurationProvider configurationProvider = mock(ConfigurationProvider.class); when(configurationProvider.collectionMap()).thenReturn(new CollectionMap()); @@ -282,4 +283,4 @@ void incrementsNotMyVbucketIndicator() { } } -} \ No newline at end of file +} diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/MemcacheProtocolTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/MemcacheProtocolTest.java index eeecfcf1d..10350b8c7 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/MemcacheProtocolTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/MemcacheProtocolTest.java @@ -37,6 +37,7 @@ import java.util.function.Function; import static com.couchbase.client.core.util.CbCollections.setOf; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static java.util.Collections.emptySet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -61,7 +62,7 @@ class MemcacheProtocolTest { void before() { eventBus = mock(EventBus.class); CoreEnvironment env = mock(CoreEnvironment.class); - context = new CoreContext(mock(Core.class), 1, env, mock(Authenticator.class)); + context = new CoreContext(mockCore(), 1, env, mock(Authenticator.class)); when(env.eventBus()).thenReturn(eventBus); } diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SaslListMechanismsHandlerTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SaslListMechanismsHandlerTest.java index 5da7c013f..c7160bec2 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SaslListMechanismsHandlerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SaslListMechanismsHandlerTest.java @@ -51,6 +51,7 @@ import static com.couchbase.client.core.io.netty.kv.ProtocolVerifier.decodeHexDump; import static com.couchbase.client.core.io.netty.kv.ProtocolVerifier.verifyRequest; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.readResource; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -78,7 +79,7 @@ protected void beforeEach() { when(env.eventBus()).thenReturn(eventBus); when(env.timeoutConfig()).thenReturn(timeoutConfig); when(timeoutConfig.connectTimeout()).thenReturn(Duration.ofMillis(1000)); - CoreContext coreContext = new CoreContext(mock(Core.class), 1, env, mock(Authenticator.class)); + CoreContext coreContext = new CoreContext(mockCore(), 1, env, mock(Authenticator.class)); endpointContext = new EndpointContext(coreContext, new HostAndPort("127.0.0.1", 1234), null, ServiceType.KV, Optional.empty(), Optional.empty(), Optional.empty()); } diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SelectBucketHandlerTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SelectBucketHandlerTest.java index 8823908bd..54dc3b6ac 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SelectBucketHandlerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/kv/SelectBucketHandlerTest.java @@ -42,6 +42,7 @@ import java.util.Optional; import java.util.concurrent.TimeoutException; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -71,7 +72,7 @@ void setup() { when(env.eventBus()).thenReturn(simpleEventBus); when(env.timeoutConfig()).thenReturn(timeoutConfig); when(timeoutConfig.connectTimeout()).thenReturn(Duration.ofMillis(10)); - CoreContext coreContext = new CoreContext(mock(Core.class), 1, env, mock(Authenticator.class)); + CoreContext coreContext = new CoreContext(mockCore(), 1, env, mock(Authenticator.class)); endpointContext = new EndpointContext(coreContext, new HostAndPort("127.0.0.1", 1234), null, ServiceType.KV, Optional.empty(), Optional.empty(), Optional.empty()); } diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/manager/ManagerMessageHandlerTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/manager/ManagerMessageHandlerTest.java index 17b81b213..14185a1ff 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/manager/ManagerMessageHandlerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/manager/ManagerMessageHandlerTest.java @@ -55,6 +55,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.readResource; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -90,7 +91,7 @@ static void teardown() { */ @Test void returnsNewConfigsWhenChunked() throws Exception { - CoreContext ctx = new CoreContext(mock(Core.class), 1, ENV, PasswordAuthenticator.create(USER, PASS)); + CoreContext ctx = new CoreContext(mockCore(), 1, ENV, PasswordAuthenticator.create(USER, PASS)); BaseEndpoint endpoint = mock(BaseEndpoint.class); EndpointContext endpointContext = mock(EndpointContext.class); when(endpointContext.environment()).thenReturn(ENV); diff --git a/core-io/src/test/java/com/couchbase/client/core/io/netty/query/QueryMessageHandlerTest.java b/core-io/src/test/java/com/couchbase/client/core/io/netty/query/QueryMessageHandlerTest.java index 962466138..5db77f074 100644 --- a/core-io/src/test/java/com/couchbase/client/core/io/netty/query/QueryMessageHandlerTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/io/netty/query/QueryMessageHandlerTest.java @@ -47,6 +47,7 @@ import java.util.Optional; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.readResource; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -73,7 +74,7 @@ class QueryMessageHandlerTest { @BeforeAll static void setup() { ENV = CoreEnvironment.create(); - CORE_CTX = new CoreContext(mock(Core.class), 1, ENV, PasswordAuthenticator.create("user", "pass")); + CORE_CTX = new CoreContext(mockCore(), 1, ENV, PasswordAuthenticator.create("user", "pass")); ENDPOINT_CTX = new EndpointContext(CORE_CTX, new HostAndPort("127.0.0.1", 1234), NoopCircuitBreaker.INSTANCE, ServiceType.QUERY, Optional.empty(), Optional.empty(), Optional.empty()); } diff --git a/core-io/src/test/java/com/couchbase/client/core/kv/OrchestratorProxy.java b/core-io/src/test/java/com/couchbase/client/core/kv/OrchestratorProxy.java index 0f35de273..14ea3403a 100644 --- a/core-io/src/test/java/com/couchbase/client/core/kv/OrchestratorProxy.java +++ b/core-io/src/test/java/com/couchbase/client/core/kv/OrchestratorProxy.java @@ -45,6 +45,7 @@ import java.util.Optional; import java.util.UUID; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -58,7 +59,7 @@ class OrchestratorProxy { private final CouchbaseBucketConfig bucketConfig; - public OrchestratorProxy(final CoreEnvironment environment, boolean capabilityEnabled) { + public OrchestratorProxy(final CoreEnvironment environment, boolean capabilityEnabled, Map> data) { // Set up Bucket Config ClusterConfig clusterConfig = new ClusterConfig(); bucketConfig = mock(CouchbaseBucketConfig.class); @@ -74,15 +75,17 @@ public OrchestratorProxy(final CoreEnvironment environment, boolean capabilityEn when(configurationProvider.config()).thenReturn(clusterConfig); // Set up core - core = mock(Core.class); + core = mockCore(); CoreContext coreContext = new CoreContext(core, 1, environment, null); when(core.context()).thenReturn(coreContext); when(core.configurationProvider()).thenReturn(configurationProvider); rangeScanOrchestrator = new RangeScanOrchestrator(core, collectionIdentifier); + + prepare(data); } - void prepare(final Map> data) { + private void prepare(final Map> data) { when(bucketConfig.numberOfPartitions()).thenReturn(data.size()); Map uuids = new HashMap<>(); diff --git a/core-io/src/test/java/com/couchbase/client/core/kv/RangeScanOrchestratorTest.java b/core-io/src/test/java/com/couchbase/client/core/kv/RangeScanOrchestratorTest.java index 04bda1d6b..6c4b06e4b 100644 --- a/core-io/src/test/java/com/couchbase/client/core/kv/RangeScanOrchestratorTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/kv/RangeScanOrchestratorTest.java @@ -21,21 +21,21 @@ import com.couchbase.client.core.env.CoreEnvironment; import com.couchbase.client.core.error.FeatureNotAvailableException; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; import java.nio.charset.StandardCharsets; import java.time.Instant; import java.util.Comparator; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Random; +import static com.couchbase.client.core.util.CbCollections.mapOf; import static com.couchbase.client.core.util.CbStrings.MAX_CODE_POINT_AS_STRING; import static com.couchbase.client.core.util.CbStrings.MIN_CODE_POINT_AS_STRING; +import static java.util.Collections.emptyMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -46,27 +46,24 @@ class RangeScanOrchestratorTest { private static final CoreEnvironment ENVIRONMENT = CoreEnvironment.create(); - private OrchestratorProxy orchestrator; - - @BeforeEach - void beforeEach() { - orchestrator = new OrchestratorProxy(ENVIRONMENT, true); - } - @AfterAll static void afterAll() { ENVIRONMENT.shutdown(); } + private static OrchestratorProxy newProxy(Map> data) { + return new OrchestratorProxy(ENVIRONMENT, true, data); + } + /** * Most basic test which makes sure that items are streamed as-is from the underlying partitions. */ @Test void streamsUnsortedRangeScan() { - Map> data = new HashMap<>(); - data.put((short) 0, randomItemsSorted(5)); - data.put((short) 1, randomItemsSorted(3)); - orchestrator.prepare(data); + OrchestratorProxy orchestrator = newProxy(mapOf( + (short) 0, randomItemsSorted(5), + (short) 1, randomItemsSorted(3) + )); List result = orchestrator.runRangeScan(new TestRangeScan(), new TestScanOptions()); assertEquals(8, result.size()); @@ -77,10 +74,10 @@ void streamsUnsortedRangeScan() { */ @Test void streamsUnsortedSamplingScan() { - Map> data = new HashMap<>(); - data.put((short) 0, randomItemsSorted(3)); - data.put((short) 1, randomItemsSorted(4)); - orchestrator.prepare(data); + OrchestratorProxy orchestrator = newProxy(mapOf( + (short) 0, randomItemsSorted(3), + (short) 1, randomItemsSorted(4) + )); List result = orchestrator.runSamplingScan(new TestSamplingScan(10), new TestScanOptions()); assertEquals(7, result.size()); @@ -91,10 +88,10 @@ void streamsUnsortedSamplingScan() { */ @Test void samplingStopsAtLimit() { - Map> data = new HashMap<>(); - data.put((short) 0, randomItemsSorted(12)); - data.put((short) 1, randomItemsSorted(10)); - orchestrator.prepare(data); + OrchestratorProxy orchestrator = newProxy(mapOf( + (short) 0, randomItemsSorted(12), + (short) 1, randomItemsSorted(10) + )); List result = orchestrator.runSamplingScan( new TestSamplingScan(10), new TestScanOptions()); assertEquals(10, result.size()); @@ -105,7 +102,7 @@ void samplingStopsAtLimit() { */ @Test void failIfBucketCapabilityNotAvailable() { - OrchestratorProxy orchestrator = new OrchestratorProxy(ENVIRONMENT, false); + OrchestratorProxy orchestrator = new OrchestratorProxy(ENVIRONMENT, false, emptyMap()); assertThrows(FeatureNotAvailableException.class, () -> orchestrator.runRangeScan(new TestRangeScan(), new TestScanOptions())); } diff --git a/core-io/src/test/java/com/couchbase/client/core/msg/RequestContextTest.java b/core-io/src/test/java/com/couchbase/client/core/msg/RequestContextTest.java index 02a4fe7e2..93dead274 100644 --- a/core-io/src/test/java/com/couchbase/client/core/msg/RequestContextTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/msg/RequestContextTest.java @@ -25,6 +25,7 @@ import java.util.HashMap; import java.util.Map; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -40,7 +41,7 @@ class RequestContextTest { @Test void requestCancellation() { Request request = mock(Request.class); - Core core = mock(Core.class); + Core core = mockCore(); RequestContext ctx = new RequestContext(new CoreContext(core, 1, null, mock(Authenticator.class)), request); ctx.cancel(); @@ -50,7 +51,7 @@ void requestCancellation() { @Test void customPayloadCanBeAttached() { Request request = mock(Request.class); - Core core = mock(Core.class); + Core core = mockCore(); RequestContext ctx = new RequestContext(new CoreContext(core, 1, null, mock(Authenticator.class)), request); assertNull(ctx.clientContext()); @@ -61,4 +62,4 @@ void customPayloadCanBeAttached() { assertEquals(payload, ctx.clientContext()); } -} \ No newline at end of file +} diff --git a/core-io/src/test/java/com/couchbase/client/core/node/KeyValueLocatorTest.java b/core-io/src/test/java/com/couchbase/client/core/node/KeyValueLocatorTest.java index 4d806e9b0..9656e0373 100644 --- a/core-io/src/test/java/com/couchbase/client/core/node/KeyValueLocatorTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/node/KeyValueLocatorTest.java @@ -26,7 +26,6 @@ import com.couchbase.client.core.msg.CancellationReason; import com.couchbase.client.core.msg.Request; import com.couchbase.client.core.msg.RequestContext; -import com.couchbase.client.core.msg.TargetedRequest; import com.couchbase.client.core.msg.kv.CarrierBucketConfigRequest; import com.couchbase.client.core.msg.kv.GetRequest; import org.junit.jupiter.api.Test; @@ -36,7 +35,11 @@ import java.util.Collections; import java.util.List; +import static com.couchbase.client.core.util.MockUtil.mockCore; +import static com.couchbase.client.core.topology.TopologyTestUtils.nodeId; +import static com.couchbase.client.core.topology.TopologyTestUtils.nodeInfo; import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyMap; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -53,22 +56,20 @@ class KeyValueLocatorTest { void locateGetRequestForCouchbaseBucket() { Locator locator = new KeyValueLocator(); - NodeInfo nodeInfo1 = new NodeInfo("http://foo:1234", "192.168.56.101:8091", - Collections.EMPTY_MAP, null); - NodeInfo nodeInfo2 = new NodeInfo("http://foo:1234", "192.168.56.102:8091", - Collections.EMPTY_MAP, null); + NodeInfo nodeInfo1 = nodeInfo("192.168.56.101", emptyMap()); + NodeInfo nodeInfo2 = nodeInfo("192.168.56.102", emptyMap()); GetRequest getRequestMock = mock(GetRequest.class); ClusterConfig configMock = mock(ClusterConfig.class); Node node1Mock = mock(Node.class); - when(node1Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.101", 8091)); + when(node1Mock.identifier()).thenReturn(nodeInfo1.id()); Node node2Mock = mock(Node.class); - when(node2Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.102", 8091)); + when(node2Mock.identifier()).thenReturn(nodeInfo2.id()); List nodes = new ArrayList<>(Arrays.asList(node1Mock, node2Mock)); CouchbaseBucketConfig bucketMock = mock(CouchbaseBucketConfig.class); when(getRequestMock.bucket()).thenReturn("bucket"); when(getRequestMock.key()).thenReturn("key".getBytes(UTF_8)); - CoreContext coreContext = new CoreContext(mock(Core.class), 1, mock(CoreEnvironment.class), mock(Authenticator.class)); + CoreContext coreContext = new CoreContext(mockCore(), 1, mock(CoreEnvironment.class), mock(Authenticator.class)); when(getRequestMock.context()).thenReturn(new RequestContext(coreContext, getRequestMock)); when(configMock.bucketConfig("bucket")).thenReturn(bucketMock); when(bucketMock.nodes()).thenReturn(Arrays.asList(nodeInfo1, nodeInfo2)); @@ -86,16 +87,13 @@ void locateGetRequestForCouchbaseBucket() { void pickFastForwardIfAvailableAndNmvbSeen() { Locator locator = new KeyValueLocator(); - // Setup 2 nodes - NodeInfo nodeInfo1 = new NodeInfo("http://foo:1234", "192.168.56.101:8091", - Collections.EMPTY_MAP, null); - NodeInfo nodeInfo2 = new NodeInfo("http://foo:1234", "192.168.56.102:8091", - Collections.EMPTY_MAP, null); + NodeInfo nodeInfo1 = nodeInfo("192.168.56.101", emptyMap()); + NodeInfo nodeInfo2 = nodeInfo("192.168.56.102", emptyMap()); Node node1Mock = mock(Node.class); - when(node1Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.101", 8091)); + when(node1Mock.identifier()).thenReturn(nodeInfo1.id()); Node node2Mock = mock(Node.class); - when(node2Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.102", 8091)); + when(node2Mock.identifier()).thenReturn(nodeInfo2.id()); List nodes = new ArrayList<>(Arrays.asList(node1Mock, node2Mock)); // Configure Cluster and Bucket config @@ -144,15 +142,13 @@ void pickFastForwardIfAvailableAndNmvbSeen() { void pickCurrentIfNoFFMapAndRetry() { Locator locator = new KeyValueLocator(); - // Setup 2 nodes - NodeInfo nodeInfo1 = new NodeInfo("http://foo:1234", "192.168.56.101:8091", - Collections.EMPTY_MAP, null); - NodeInfo nodeInfo2 = new NodeInfo("http://foo:1234", "192.168.56.102:8091", - Collections.EMPTY_MAP, null); + NodeInfo nodeInfo1 = nodeInfo("192.168.56.101", emptyMap()); + NodeInfo nodeInfo2 = nodeInfo("192.168.56.102", emptyMap()); + Node node1Mock = mock(Node.class); - when(node1Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.101", 8091)); + when(node1Mock.identifier()).thenReturn(nodeInfo1.id()); Node node2Mock = mock(Node.class); - when(node2Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.102", 8091)); + when(node2Mock.identifier()).thenReturn(nodeInfo2.id()); List nodes = new ArrayList<>(Arrays.asList(node1Mock, node2Mock)); // Configure Cluster and Bucket config @@ -200,15 +196,13 @@ void pickCurrentIfNoFFMapAndRetry() { void pickCurrentIfNoFFMapAndNmvbSeen() { Locator locator = new KeyValueLocator(); - // Setup 2 nodes - NodeInfo nodeInfo1 = new NodeInfo("http://foo:1234", "192.168.56.101:8091", - Collections.EMPTY_MAP, null); - NodeInfo nodeInfo2 = new NodeInfo("http://foo:1234", "192.168.56.102:8091", - Collections.EMPTY_MAP, null); + NodeInfo nodeInfo1 = nodeInfo("192.168.56.101", emptyMap()); + NodeInfo nodeInfo2 = nodeInfo("192.168.56.102", emptyMap()); + Node node1Mock = mock(Node.class); - when(node1Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.101", 8091)); + when(node1Mock.identifier()).thenReturn(nodeInfo1.id()); Node node2Mock = mock(Node.class); - when(node2Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.102", 8091)); + when(node2Mock.identifier()).thenReturn(nodeInfo2.id()); List nodes = new ArrayList<>(Arrays.asList(node1Mock, node2Mock)); // Configure Cluster and Bucket config @@ -248,7 +242,7 @@ void cancelsTargetedRequestIfNodeListEmpty() { Locator locator = new KeyValueLocator(); Request request = mock(CarrierBucketConfigRequest.class); - when(request.target()).thenReturn(new NodeIdentifier("localhost", 8091)); + when(request.target()).thenReturn(nodeId("localhost", 8091)); locator.dispatch(request, Collections.emptyList(), null, null); @@ -260,11 +254,11 @@ void cancelsTargetedRequestIfNodeNotInList() { Locator locator = new KeyValueLocator(); Request request = mock(CarrierBucketConfigRequest.class); - when(request.target()).thenReturn(new NodeIdentifier("hostb", 8091)); + when(request.target()).thenReturn(nodeId("hostb", 8091)); Node node = mock(Node.class); when(node.state()).thenReturn(NodeState.CONNECTED); - when(node.identifier()).thenReturn(new NodeIdentifier("hosta", 8091)); + when(node.identifier()).thenReturn(nodeId("hosta", 8091)); locator.dispatch(request, Collections.singletonList(node), null, null); diff --git a/core-io/src/test/java/com/couchbase/client/core/node/NodeTest.java b/core-io/src/test/java/com/couchbase/client/core/node/NodeTest.java index 20cf751e3..818df5d83 100644 --- a/core-io/src/test/java/com/couchbase/client/core/node/NodeTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/node/NodeTest.java @@ -39,6 +39,7 @@ import com.couchbase.client.core.service.Service; import com.couchbase.client.core.service.ServiceState; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.core.util.HostAndPort; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -52,6 +53,7 @@ import java.util.concurrent.atomic.AtomicReference; import static com.couchbase.client.core.util.CbCollections.listOf; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -79,7 +81,7 @@ private static NodeIdentifier testNodeIdentifier() { @BeforeAll static void beforeAll() { - Core core = mock(Core.class); + Core core = mockCore(); ENV = CoreEnvironment .builder() .build(); diff --git a/core-io/src/test/java/com/couchbase/client/core/node/RoundRobinLocatorTest.java b/core-io/src/test/java/com/couchbase/client/core/node/RoundRobinLocatorTest.java index 8687d1292..07af6aaba 100644 --- a/core-io/src/test/java/com/couchbase/client/core/node/RoundRobinLocatorTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/node/RoundRobinLocatorTest.java @@ -19,10 +19,10 @@ import com.couchbase.client.core.config.ClusterConfig; import com.couchbase.client.core.msg.CancellationReason; import com.couchbase.client.core.msg.Request; -import com.couchbase.client.core.msg.TargetedRequest; import com.couchbase.client.core.msg.manager.BucketConfigRequest; import com.couchbase.client.core.msg.query.QueryRequest; import com.couchbase.client.core.service.ServiceType; +import com.couchbase.client.core.topology.NodeIdentifier; import org.junit.jupiter.api.Test; import java.util.ArrayList; @@ -30,6 +30,7 @@ import java.util.Collections; import java.util.List; +import static com.couchbase.client.core.topology.TopologyTestUtils.nodeId; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -51,10 +52,10 @@ void selectNextNode() { Node node1Mock = mock(Node.class); when(node1Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(true); - when(node1Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.101", 8091)); + when(node1Mock.identifier()).thenReturn(nodeId("192.168.56.101", 8091)); Node node2Mock = mock(Node.class); when(node2Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(true); - when(node2Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.102", 8091)); + when(node2Mock.identifier()).thenReturn(nodeId("192.168.56.102", 8091)); List nodes = new ArrayList<>(Arrays.asList(node1Mock, node2Mock)); locator.dispatch(request, nodes, configMock, null); @@ -79,13 +80,13 @@ void skipNodeWithoutServiceEnabled() { when(configMock.hasClusterOrBucketConfig()).thenReturn(true); Node node1Mock = mock(Node.class); - when(node1Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.101", 8091)); + when(node1Mock.identifier()).thenReturn(nodeId("192.168.56.101", 8091)); when(node1Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(false); Node node2Mock = mock(Node.class); - when(node2Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.102", 8091)); + when(node2Mock.identifier()).thenReturn(nodeId("192.168.56.102", 8091)); when(node2Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(true); Node node3Mock = mock(Node.class); - when(node3Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.103", 8091)); + when(node3Mock.identifier()).thenReturn(nodeId("192.168.56.103", 8091)); when(node3Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(false); List nodes = new ArrayList<>(Arrays.asList(node1Mock, node2Mock, node3Mock)); @@ -119,16 +120,16 @@ void shouldDistributeFairlyUnderMDS() { when(configMock.hasClusterOrBucketConfig()).thenReturn(true); Node node1Mock = mock(Node.class); - when(node1Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.101", 8091)); + when(node1Mock.identifier()).thenReturn(nodeId("192.168.56.101", 8091)); when(node1Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(false); Node node2Mock = mock(Node.class); - when(node2Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.102", 8091)); + when(node2Mock.identifier()).thenReturn(nodeId("192.168.56.102", 8091)); when(node2Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(false); Node node3Mock = mock(Node.class); - when(node3Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.103", 8091)); + when(node3Mock.identifier()).thenReturn(nodeId("192.168.56.103", 8091)); when(node3Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(true); Node node4Mock = mock(Node.class); - when(node4Mock.identifier()).thenReturn(new NodeIdentifier("192.168.56.104", 8091)); + when(node4Mock.identifier()).thenReturn(nodeId("192.168.56.104", 8091)); when(node4Mock.serviceEnabled(ServiceType.QUERY)).thenReturn(true); List nodes = new ArrayList<>(Arrays.asList(node1Mock, node2Mock, node3Mock, node4Mock)); @@ -162,11 +163,11 @@ void cancelsTargetedRequestIfNodeNotInList() { Locator locator = new RoundRobinLocator(ServiceType.QUERY); Request request = mock(BucketConfigRequest.class); - when(request.target()).thenReturn(new NodeIdentifier("hostb", 8091)); + when(request.target()).thenReturn(nodeId("hostb", 8091)); Node node = mock(Node.class); when(node.state()).thenReturn(NodeState.CONNECTED); - when(node.identifier()).thenReturn(new NodeIdentifier("hosta", 8091)); + when(node.identifier()).thenReturn(nodeId("hosta", 8091)); when(node.serviceEnabled(ServiceType.QUERY)).thenReturn(true); locator.dispatch(request, Collections.singletonList(node), null, null); diff --git a/core-io/src/test/java/com/couchbase/client/core/node/ViewLocatorTest.java b/core-io/src/test/java/com/couchbase/client/core/node/ViewLocatorTest.java index a036114b7..a18695ff8 100644 --- a/core-io/src/test/java/com/couchbase/client/core/node/ViewLocatorTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/node/ViewLocatorTest.java @@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import com.couchbase.client.core.topology.NodeIdentifier; class ViewLocatorTest { @@ -36,15 +37,19 @@ void dispatchesOnlyToHostsWithPrimaryPartitionsEnabled() { CouchbaseBucketConfig bucketConfig = mock(CouchbaseBucketConfig.class); ClusterConfig config = mock(ClusterConfig.class); when(config.bucketConfig("bucket")).thenReturn(bucketConfig); - when(bucketConfig.hasPrimaryPartitionsOnNode(new NodeIdentifier("1.2.3.4", 1234))).thenReturn(true); - when(bucketConfig.hasPrimaryPartitionsOnNode(new NodeIdentifier("1.2.3.5", 1234))).thenReturn(false); + + NodeIdentifier id1 = NodeIdentifier.forBootstrap("1.2.3.4", 1234); + NodeIdentifier id2 = NodeIdentifier.forBootstrap("1.2.3.5", 1234); + + when(bucketConfig.hasPrimaryPartitionsOnNode(id1.toLegacy())).thenReturn(true); + when(bucketConfig.hasPrimaryPartitionsOnNode(id2.toLegacy())).thenReturn(false); Node node1 = mock(Node.class); - when(node1.identifier()).thenReturn(new NodeIdentifier("1.2.3.4", 1234)); + when(node1.identifier()).thenReturn(id1); assertTrue(locator.nodeCanBeUsed(node1, request, config)); Node node2 = mock(Node.class); - when(node2.identifier()).thenReturn(new NodeIdentifier("1.2.3.5", 1234)); + when(node2.identifier()).thenReturn(id2); assertFalse(locator.nodeCanBeUsed(node2, request, config)); } diff --git a/core-io/src/test/java/com/couchbase/client/core/retry/RetryOrchestratorTest.java b/core-io/src/test/java/com/couchbase/client/core/retry/RetryOrchestratorTest.java index 98520d055..4d448df1b 100644 --- a/core-io/src/test/java/com/couchbase/client/core/retry/RetryOrchestratorTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/retry/RetryOrchestratorTest.java @@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -80,7 +81,7 @@ void cancelIfNoMoreRetriesAllowed() { CoreEnvironment env = mock(CoreEnvironment.class); SimpleEventBus eventBus = new SimpleEventBus(true); when(env.eventBus()).thenReturn(eventBus); - CoreContext context = new CoreContext(mock(Core.class), 1, env, mock(Authenticator.class)); + CoreContext context = new CoreContext(mockCore(), 1, env, mock(Authenticator.class)); RetryOrchestrator.maybeRetry(context, request, RetryReason.UNKNOWN); verify(request, times(1)).cancel(CancellationReason.noMoreRetries(RetryReason.UNKNOWN), Function.identity()); diff --git a/core-io/src/test/java/com/couchbase/client/core/service/PooledServiceTest.java b/core-io/src/test/java/com/couchbase/client/core/service/PooledServiceTest.java index 914b22ad2..a3ae9938a 100644 --- a/core-io/src/test/java/com/couchbase/client/core/service/PooledServiceTest.java +++ b/core-io/src/test/java/com/couchbase/client/core/service/PooledServiceTest.java @@ -45,6 +45,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import static com.couchbase.client.core.util.MockUtil.mockCore; import static com.couchbase.client.test.Util.waitUntilCondition; import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.atLeastOnce; @@ -71,7 +72,7 @@ class PooledServiceTest { void beforeEach() { eventBus = new SimpleEventBus(true, Collections.singletonList(ServiceStateChangedEvent.class)); environment = CoreEnvironment.builder().eventBus(eventBus).build(); - CoreContext coreContext = new CoreContext(mock(Core.class), 1, environment, authenticator); + CoreContext coreContext = new CoreContext(mockCore(), 1, environment, authenticator); serviceContext = new ServiceContext(coreContext, "127.0.0.1", 1234, ServiceType.KV, Optional.empty()); } @@ -836,4 +837,4 @@ public > Endpoint select(R r, List ports) { + return new HostAndServicePorts(host, ports, nodeId(host, ports.getOrDefault(ServiceType.MANAGER, 8091)), null, null); + } + + @Deprecated + public static NodeInfo nodeInfo(String host, Map ports) { + return new NodeInfo(host, ports, emptyMap(), null, nodeId(host, ports.getOrDefault(ServiceType.MANAGER, 8091)).toLegacy()); + } + + public static ClusterTopology clusterTopology(List nodes) { + return ClusterTopology.of( + new TopologyRevision(1, 1), + null, + nodes, + emptySet(), + NetworkResolution.DEFAULT, + PortSelector.NON_TLS, + null // no bucket + ); + } + + public static TestTopologyParser topologyParser() { + return new TestTopologyParser(); + } + + public static class TestTopologyParser { + private String originHost = "127.0.0.1"; + private NetworkSelector networkSelector = NetworkSelector.DEFAULT; + private PortSelector portSelector = PortSelector.NON_TLS; + private MemcachedHashingStrategy memcachedHashingStrategy = StandardMemcachedHashingStrategy.INSTANCE; + + private TestTopologyParser() { + } + + private TestTopologyParser(String originHost, NetworkSelector networkSelector, PortSelector portSelector, MemcachedHashingStrategy memcachedHashingStrategy) { + this.originHost = requireNonNull(originHost); + this.networkSelector = requireNonNull(networkSelector); + this.portSelector = requireNonNull(portSelector); + this.memcachedHashingStrategy = requireNonNull(memcachedHashingStrategy); + } + + public TestTopologyParser originHost(String originHost) { + return new TestTopologyParser(originHost, networkSelector, portSelector, memcachedHashingStrategy); + } + + public TestTopologyParser networkSelector(NetworkSelector networkSelector) { + return new TestTopologyParser(originHost, networkSelector, portSelector, memcachedHashingStrategy); + } + + public TestTopologyParser portSelector(PortSelector portSelector) { + return new TestTopologyParser(originHost, networkSelector, portSelector, memcachedHashingStrategy); + } + + public TestTopologyParser memcachedHashingStrategy(MemcachedHashingStrategy memcachedHashingStrategy) { + return new TestTopologyParser(originHost, networkSelector, portSelector, memcachedHashingStrategy); + } + + public ClusterTopology parse(String json) { + return new TopologyParser(networkSelector, portSelector, memcachedHashingStrategy).parse(json, originHost); + } + + @Deprecated + public BucketConfig parseBucketConfig(String json) { + return LegacyConfigHelper.toLegacyBucketConfig(parse(json).requireBucket()); + } + + @Deprecated + public GlobalConfig parseGlobalConfig(String json) { + return new GlobalConfig(parse(json)); + } + } + +} + diff --git a/core-io/src/test/java/com/couchbase/client/core/util/MockUtil.java b/core-io/src/test/java/com/couchbase/client/core/util/MockUtil.java new file mode 100644 index 000000000..ee086636d --- /dev/null +++ b/core-io/src/test/java/com/couchbase/client/core/util/MockUtil.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.couchbase.client.core.util; + +import com.couchbase.client.core.Core; +import com.couchbase.client.core.CoreResources; +import com.couchbase.client.core.cnc.RequestTracer; +import com.couchbase.client.core.cnc.tracing.NoopRequestTracer; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MockUtil { + private MockUtil() {} + + public static Core mockCore() { + Core core = mock(Core.class); + CoreResources coreResources = new CoreResources() { + @Override + public RequestTracer requestTracer() { + return NoopRequestTracer.INSTANCE; + } + }; + when(core.coreResources()).thenReturn(coreResources); + return core; + } +} diff --git a/java-client/pom.xml b/java-client/pom.xml index fecef8846..4df936c12 100644 --- a/java-client/pom.xml +++ b/java-client/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 java-client - 3.7.3 + 3.7.6 Couchbase Java SDK diff --git a/java-client/src/integrationTest/java/com/couchbase/client/java/ReplicaReadIntegrationTest.java b/java-client/src/integrationTest/java/com/couchbase/client/java/ReplicaReadIntegrationTest.java index e0018f0d8..ff7a2bcf5 100644 --- a/java-client/src/integrationTest/java/com/couchbase/client/java/ReplicaReadIntegrationTest.java +++ b/java-client/src/integrationTest/java/com/couchbase/client/java/ReplicaReadIntegrationTest.java @@ -17,7 +17,7 @@ package com.couchbase.client.java; import com.couchbase.client.core.cnc.events.request.IndividualReplicaGetFailedEvent; -import com.couchbase.client.core.deps.com.google.common.collect.Sets; +import com.couchbase.client.core.config.CouchbaseBucketConfig; import com.couchbase.client.core.error.DocumentNotFoundException; import com.couchbase.client.core.error.DocumentUnretrievableException; import com.couchbase.client.core.error.UnambiguousTimeoutException; @@ -46,11 +46,14 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static com.couchbase.client.core.util.CbCollections.setCopyOf; import static com.couchbase.client.core.util.CbCollections.setOf; +import static com.couchbase.client.core.node.KeyValueLocator.partitionForKey; import static com.couchbase.client.core.util.CbCollections.transform; import static com.couchbase.client.test.Util.waitUntilCondition; +import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.emptyList; import static java.util.Collections.emptySet; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -101,7 +104,7 @@ void alwaysPassesWithAll() { collection.upsert(id, "Hello, World!"); List results = collection.getAllReplicas(id).collect(Collectors.toList()); - assertFalse(results.isEmpty()); + assertEquals(numAvailableCopies(id), results.size(), results::toString); for (GetResult result : results) { assertEquals("Hello, World!", result.contentAs(String.class)); assertFalse(result.expiryTime().isPresent()); @@ -181,8 +184,7 @@ void asyncGetAnyReturnsFailedFutureWhenNotFound() throws InterruptedException { void asyncGetAllReturnsListOfFailedFuturesWhenNotFound() throws Exception { List> futures = collection.async().getAllReplicas(absentId()).get(); - // one result for each replica, plus 1 for active - assertEquals(config().numReplicas() + 1, futures.size()); + assertEquals(numAvailableCopies(absentId()), futures.size(), futures::toString); List> errorClasses = transform(futures, future -> { ExecutionException e = assertThrows(ExecutionException.class, future::get); @@ -242,7 +244,7 @@ void reactiveGetAllReturnsResult() throws Exception { .block(); assertNotNull(results); - assertNotEquals(0, results.size()); + assertEquals(numAvailableCopies(absentId()), results.size(), results::toString); int primaryCount = 0; for (GetReplicaResult result : results) { @@ -378,4 +380,25 @@ void noMonoReturnsErrorIfEmpty() { assertThrows(NoSuchElementException.class, () -> flux3.next().block()); } + + /** + * Returns the number of available replicas + active associated with the + * given document ID. + */ + private static int numAvailableCopies(String key) { + CouchbaseBucketConfig bucket = (CouchbaseBucketConfig) cluster.core() + .configurationProvider() + .config() + .bucketConfig(config().bucketname()); + + int partition = partitionForKey(key.getBytes(UTF_8), bucket.numberOfPartitions()); + + return (int) IntStream.range(0, bucket.numberOfReplicas()) + .filter(replicaIndex -> + bucket.nodeIndexForReplica(partition, replicaIndex, false) >= 0 + ) + .count() + // Plus active + + 1; + } } diff --git a/java-client/src/integrationTest/java/com/couchbase/client/java/http/CouchbaseHttpClientIntegrationTest.java b/java-client/src/integrationTest/java/com/couchbase/client/java/http/CouchbaseHttpClientIntegrationTest.java index 4d4c2e213..10934e751 100644 --- a/java-client/src/integrationTest/java/com/couchbase/client/java/http/CouchbaseHttpClientIntegrationTest.java +++ b/java-client/src/integrationTest/java/com/couchbase/client/java/http/CouchbaseHttpClientIntegrationTest.java @@ -16,7 +16,7 @@ package com.couchbase.client.java.http; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.java.Cluster; import com.couchbase.client.java.util.JavaIntegrationTest; import com.couchbase.client.test.Capabilities; @@ -87,7 +87,7 @@ void canReadStatusCodeForBadRequestEventing() { NodeIdentifier firstNode() { TestNodeConfig n = config().nodes().get(0); - return new NodeIdentifier(n.hostname(), n.ports().get(Services.MANAGER)); + return NodeIdentifier.forBootstrap(n.hostname(), n.ports().get(Services.MANAGER)); } // On 6.0 and below, fails with: diff --git a/java-client/src/integrationTest/resources/integration.properties b/java-client/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/java-client/src/integrationTest/resources/integration.properties +++ b/java-client/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/java-client/src/main/java/com/couchbase/client/java/AsyncCluster.java b/java-client/src/main/java/com/couchbase/client/java/AsyncCluster.java index 2938f08cb..07b41058b 100644 --- a/java-client/src/main/java/com/couchbase/client/java/AsyncCluster.java +++ b/java-client/src/main/java/com/couchbase/client/java/AsyncCluster.java @@ -310,7 +310,7 @@ public AsyncAnalyticsIndexManager analyticsIndexes() { * Provides access to the N1QL index management services. */ public AsyncQueryIndexManager queryIndexes() { - return new AsyncQueryIndexManager(couchbaseOps().queryOps(), couchbaseOps().environment().requestTracer(), this); + return new AsyncQueryIndexManager(couchbaseOps().queryOps(), couchbaseOps().coreResources().requestTracer(), this); } /** @@ -397,7 +397,8 @@ AnalyticsRequest analyticsRequest(final String statement, final AnalyticsOptions final byte[] queryBytes = query.toString().getBytes(StandardCharsets.UTF_8); final String clientContextId = query.getString("client_context_id"); - final RequestSpan span = environment() + final RequestSpan span = couchbaseOps() + .coreResources() .requestTracer() .requestSpan(TracingIdentifiers.SPAN_REQUEST_ANALYTICS, opts.parentSpan().orElse(null)); AnalyticsRequest request = new AnalyticsRequest(timeout, core().context(), retryStrategy, authenticator, diff --git a/java-client/src/main/java/com/couchbase/client/java/AsyncCollection.java b/java-client/src/main/java/com/couchbase/client/java/AsyncCollection.java index cb2e63028..156623ad0 100644 --- a/java-client/src/main/java/com/couchbase/client/java/AsyncCollection.java +++ b/java-client/src/main/java/com/couchbase/client/java/AsyncCollection.java @@ -145,7 +145,7 @@ public class AsyncCollection { this.asyncBinaryCollection = new AsyncBinaryCollection(keyspace, couchbaseOps, cluster); this.kvOps = couchbaseOps.kvOps(keyspace); - this.queryIndexManager = new AsyncCollectionQueryIndexManager(couchbaseOps.queryOps(), couchbaseOps.environment().requestTracer(), keyspace); + this.queryIndexManager = new AsyncCollectionQueryIndexManager(couchbaseOps.queryOps(), couchbaseOps.coreResources().requestTracer(), keyspace); } /** @@ -356,6 +356,7 @@ public CompletableFuture>> getAllReplic opts.retryStrategy().orElse(environment().retryStrategy()), opts.clientContext(), opts.parentSpan().orElse(null), + opts.readPreference(), response -> GetReplicaResult.from(response, transcoder)); } @@ -390,6 +391,7 @@ public CompletableFuture getAnyReplica(final String id, final opts.retryStrategy().orElse(environment().retryStrategy()), opts.clientContext(), opts.parentSpan().orElse(null), + opts.readPreference(), response -> GetReplicaResult.from(response, transcoder)); } @@ -709,6 +711,7 @@ public CompletableFuture>> lookupI opts.retryStrategy().orElse(environment().retryStrategy()), opts.clientContext(), opts.parentSpan().orElse(null), + opts.readPreference(), response -> LookupInReplicaResult.from(response, serializer)); } @@ -748,6 +751,7 @@ public CompletableFuture lookupInAnyReplica(final String opts.retryStrategy().orElse(environment().retryStrategy()), opts.clientContext(), opts.parentSpan().orElse(null), + opts.readPreference(), response -> LookupInReplicaResult.from(response, serializer)); } diff --git a/java-client/src/main/java/com/couchbase/client/java/AsyncScope.java b/java-client/src/main/java/com/couchbase/client/java/AsyncScope.java index 33d40d2e0..e06d32e81 100644 --- a/java-client/src/main/java/com/couchbase/client/java/AsyncScope.java +++ b/java-client/src/main/java/com/couchbase/client/java/AsyncScope.java @@ -283,7 +283,7 @@ AnalyticsRequest analyticsRequest(final String statement, final AnalyticsOptions final byte[] queryBytes = query.toString().getBytes(StandardCharsets.UTF_8); final String clientContextId = query.getString("client_context_id"); - final RequestSpan span = environment() + final RequestSpan span = couchbaseOps.coreResources() .requestTracer() .requestSpan(TracingIdentifiers.SPAN_REQUEST_ANALYTICS, opts.parentSpan().orElse(null)); AnalyticsRequest request = new AnalyticsRequest(timeout, core().context(), retryStrategy, core().context().authenticator(), diff --git a/java-client/src/main/java/com/couchbase/client/java/Cluster.java b/java-client/src/main/java/com/couchbase/client/java/Cluster.java index 3fe2ec8c0..b9aebbd13 100644 --- a/java-client/src/main/java/com/couchbase/client/java/Cluster.java +++ b/java-client/src/main/java/com/couchbase/client/java/Cluster.java @@ -334,14 +334,14 @@ public CouchbaseHttpClient httpClient() { * The user manager allows to manage users and groups. */ public UserManager users() { - return new UserManager(asyncCluster.users()); + return new UserManager(environment(), asyncCluster.users()); } /** * The bucket manager allows to perform administrative tasks on buckets and their resources. */ public BucketManager buckets() { - return new BucketManager(asyncCluster.buckets()); + return new BucketManager(environment(), asyncCluster.buckets()); } /** @@ -355,7 +355,7 @@ public AnalyticsIndexManager analyticsIndexes() { * The query index manager allows to modify and create indexes for the query service. */ public QueryIndexManager queryIndexes() { - return new QueryIndexManager(asyncCluster.queryIndexes()); + return new QueryIndexManager(environment(), asyncCluster.queryIndexes()); } /** diff --git a/java-client/src/main/java/com/couchbase/client/java/ReactiveBinaryCollection.java b/java-client/src/main/java/com/couchbase/client/java/ReactiveBinaryCollection.java index f1eff7a3e..96c100d73 100644 --- a/java-client/src/main/java/com/couchbase/client/java/ReactiveBinaryCollection.java +++ b/java-client/src/main/java/com/couchbase/client/java/ReactiveBinaryCollection.java @@ -16,6 +16,7 @@ package com.couchbase.client.java; +import static com.couchbase.client.core.util.ReactorOps.proxyToPublishOnSuppliedScheduler; import static com.couchbase.client.core.util.Validators.notNull; import static com.couchbase.client.java.BinaryCollection.DEFAULT_APPEND_OPTIONS; import static com.couchbase.client.java.BinaryCollection.DEFAULT_DECREMENT_OPTIONS; @@ -25,9 +26,9 @@ import com.couchbase.client.core.io.CollectionIdentifier; import com.couchbase.client.core.util.PreventsGarbageCollection; +import com.couchbase.client.java.env.ClusterEnvironment; import reactor.core.publisher.Mono; -import com.couchbase.client.core.Core; import com.couchbase.client.core.api.kv.CoreKvBinaryOps; import com.couchbase.client.core.error.CasMismatchException; import com.couchbase.client.core.error.CouchbaseException; @@ -53,9 +54,13 @@ public class ReactiveBinaryCollection { @PreventsGarbageCollection private final AsyncBinaryCollection async; - ReactiveBinaryCollection(final AsyncBinaryCollection async) { + ReactiveBinaryCollection(final AsyncBinaryCollection async, final ClusterEnvironment env) { this.collectionIdentifier = async.collectionIdentifier(); - this.coreKvBinaryOps = async.coreKvBinaryOps; + this.coreKvBinaryOps = proxyToPublishOnSuppliedScheduler( + async.coreKvBinaryOps, + CoreKvBinaryOps.class, + env.userScheduler() + ); this.async = requireNonNull(async); } diff --git a/java-client/src/main/java/com/couchbase/client/java/ReactiveBucket.java b/java-client/src/main/java/com/couchbase/client/java/ReactiveBucket.java index d389ca323..7764092e4 100644 --- a/java-client/src/main/java/com/couchbase/client/java/ReactiveBucket.java +++ b/java-client/src/main/java/com/couchbase/client/java/ReactiveBucket.java @@ -93,11 +93,11 @@ public Core core() { } public ReactiveCollectionManager collections() { - return new ReactiveCollectionManager(asyncBucket.collections()); + return new ReactiveCollectionManager(environment(), asyncBucket.collections()); } public ReactiveViewIndexManager viewIndexes() { - return new ReactiveViewIndexManager(asyncBucket.viewIndexes()); + return new ReactiveViewIndexManager(environment(), asyncBucket.viewIndexes()); } /** diff --git a/java-client/src/main/java/com/couchbase/client/java/ReactiveCluster.java b/java-client/src/main/java/com/couchbase/client/java/ReactiveCluster.java index 6959bc5c7..ace1d1b0a 100644 --- a/java-client/src/main/java/com/couchbase/client/java/ReactiveCluster.java +++ b/java-client/src/main/java/com/couchbase/client/java/ReactiveCluster.java @@ -33,6 +33,7 @@ import com.couchbase.client.core.error.context.ReducedQueryErrorContext; import com.couchbase.client.core.error.context.ReducedSearchErrorContext; import com.couchbase.client.core.util.ConnectionString; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.analytics.AnalyticsAccessor; import com.couchbase.client.java.analytics.AnalyticsOptions; import com.couchbase.client.java.analytics.ReactiveAnalyticsResult; @@ -115,6 +116,8 @@ public class ReactiveCluster { */ private final AsyncCluster asyncCluster; + private final ReactorOps reactor; + /** * Stores already opened buckets for reuse. */ @@ -182,9 +185,9 @@ private ReactiveCluster( */ ReactiveCluster(final AsyncCluster asyncCluster) { this.asyncCluster = asyncCluster; + this.reactor = asyncCluster.environment(); } - /** * Provides access to the underlying {@link Core}. * @@ -200,21 +203,21 @@ public Core core() { */ @Stability.Volatile public ReactiveCouchbaseHttpClient httpClient() { - return new ReactiveCouchbaseHttpClient(asyncCluster.httpClient()); + return new ReactiveCouchbaseHttpClient(reactor, asyncCluster.httpClient()); } /** * Provides access to the user management services. */ public ReactiveUserManager users() { - return new ReactiveUserManager(asyncCluster.users()); + return new ReactiveUserManager(reactor, asyncCluster.users()); } /** * Provides access to the bucket management services. */ public ReactiveBucketManager buckets() { - return new ReactiveBucketManager(async().buckets()); + return new ReactiveBucketManager(reactor, async().buckets()); } /** @@ -228,14 +231,14 @@ public ReactiveAnalyticsIndexManager analyticsIndexes() { * Provides access to the search index management services. */ public ReactiveSearchIndexManager searchIndexes() { - return new ReactiveSearchIndexManager(async().searchIndexes()); + return new ReactiveSearchIndexManager(reactor, async().searchIndexes()); } /** * Provides access to the N1QL index management services. */ public ReactiveQueryIndexManager queryIndexes() { - return new ReactiveQueryIndexManager(async().queryIndexes()); + return new ReactiveQueryIndexManager(reactor, async().queryIndexes()); } /** @@ -243,7 +246,7 @@ public ReactiveQueryIndexManager queryIndexes() { */ @Stability.Uncommitted public ReactiveEventingFunctionManager eventingFunctions() { - return new ReactiveEventingFunctionManager(async().eventingFunctions()); + return new ReactiveEventingFunctionManager(reactor, async().eventingFunctions()); } /** @@ -281,7 +284,7 @@ public Mono query(final String statement, final QueryOption notNull(options, "QueryOptions", () -> new ReducedQueryErrorContext(statement)); final QueryOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return async().queryOps.queryReactive(statement, opts, null, null, QueryAccessor::convertCoreQueryError) + return reactor.publishOnUserScheduler(async().queryOps.queryReactive(statement, opts, null, null, QueryAccessor::convertCoreQueryError)) .map(result -> new ReactiveQueryResult(result, serializer)); } @@ -307,13 +310,13 @@ public Mono analyticsQuery(final String statement, fina notNull(options, "AnalyticsOptions", () -> new ReducedAnalyticsErrorContext(statement)); AnalyticsOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return Mono.defer(() -> { + return reactor.publishOnUserScheduler(Mono.defer(() -> { return AnalyticsAccessor.analyticsQueryReactive( asyncCluster.core(), asyncCluster.analyticsRequest(statement, opts), serializer ); - }); + })); } /** @@ -350,7 +353,7 @@ public Mono search(final String indexName, final SearchReq CoreSearchRequest coreRequest = searchRequest.toCore(); SearchOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return asyncCluster.searchOps.searchReactive(indexName, coreRequest, opts) + return reactor.publishOnUserScheduler(asyncCluster.searchOps.searchReactive(indexName, coreRequest, opts)) .map(r -> new ReactiveSearchResult(r, serializer)); } @@ -387,7 +390,7 @@ public Mono searchQuery(final String indexName, final Sear notNull(options, "SearchOptions", () -> new ReducedSearchErrorContext(indexName, coreQuery)); SearchOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return asyncCluster.searchOps.searchQueryReactive(indexName, coreQuery, opts) + return reactor.publishOnUserScheduler(asyncCluster.searchOps.searchQueryReactive(indexName, coreQuery, opts)) .map(result -> new ReactiveSearchResult(result, serializer)); } @@ -423,7 +426,7 @@ public Mono disconnect() { * @param timeout overriding the default disconnect timeout if needed. */ public Mono disconnect(final Duration timeout) { - return asyncCluster.disconnectInternal(timeout); + return reactor.publishOnUserScheduler(asyncCluster.disconnectInternal(timeout)); } /** @@ -450,7 +453,7 @@ public Mono diagnostics() { * @return the {@link DiagnosticsResult} once complete. */ public Mono diagnostics(final DiagnosticsOptions options) { - return Mono.defer(() -> Mono.fromFuture(asyncCluster.diagnostics(options))); + return reactor.publishOnUserScheduler(Mono.defer(() -> Mono.fromFuture(asyncCluster.diagnostics(options)))); } /** @@ -477,7 +480,7 @@ public Mono ping() { * @return the {@link PingResult} once complete. */ public Mono ping(final PingOptions options) { - return Mono.defer(() -> Mono.fromFuture(asyncCluster.ping(options))); + return reactor.publishOnUserScheduler(Mono.defer(() -> Mono.fromFuture(asyncCluster.ping(options)))); } /** @@ -506,7 +509,7 @@ public Mono waitUntilReady(final Duration timeout) { * @return a mono that completes either once ready or timeout. */ public Mono waitUntilReady(final Duration timeout, final WaitUntilReadyOptions options) { - return Mono.defer(() -> Mono.fromFuture(asyncCluster.waitUntilReady(timeout, options))); + return reactor.publishOnUserScheduler(Mono.defer(() -> Mono.fromFuture(asyncCluster.waitUntilReady(timeout, options)))); } /** diff --git a/java-client/src/main/java/com/couchbase/client/java/ReactiveCollection.java b/java-client/src/main/java/com/couchbase/client/java/ReactiveCollection.java index e6f3159bb..6a7d4e5f2 100644 --- a/java-client/src/main/java/com/couchbase/client/java/ReactiveCollection.java +++ b/java-client/src/main/java/com/couchbase/client/java/ReactiveCollection.java @@ -65,6 +65,7 @@ import java.util.Optional; import static com.couchbase.client.core.util.CbCollections.transform; +import static com.couchbase.client.core.util.ReactorOps.proxyToPublishOnSuppliedScheduler; import static com.couchbase.client.core.util.Validators.notNull; import static com.couchbase.client.java.kv.ExistsOptions.existsOptions; import static com.couchbase.client.java.kv.GetAllReplicasOptions.getAllReplicasOptions; @@ -135,9 +136,13 @@ public class ReactiveCollection { ReactiveCollection(final AsyncCollection asyncCollection) { this.asyncCollection = asyncCollection; - this.reactiveBinaryCollection = new ReactiveBinaryCollection(asyncCollection.binary()); - this.kvOps = asyncCollection.kvOps; - this.queryIndexManager = new ReactiveCollectionQueryIndexManager(asyncCollection.queryIndexes()); + this.reactiveBinaryCollection = new ReactiveBinaryCollection(asyncCollection.binary(), asyncCollection.environment()); + this.kvOps = proxyToPublishOnSuppliedScheduler( + asyncCollection.kvOps, + CoreKvOps.class, + asyncCollection.environment().userScheduler() + ); + this.queryIndexManager = new ReactiveCollectionQueryIndexManager(environment(), asyncCollection.queryIndexes()); } /** @@ -351,7 +356,7 @@ public Flux getAllReplicas(final String id, final GetAllReplic GetAllReplicasOptions.Built opts = options.build(); final Transcoder transcoder = Optional.ofNullable(opts.transcoder()).orElse(environment().transcoder()); - return kvOps.getAllReplicasReactive(opts, id) + return kvOps.getAllReplicasReactive(opts, id, opts.readPreference()) .map(response -> GetReplicaResult.from(response, transcoder)); } @@ -381,7 +386,7 @@ public Mono getAnyReplica(final String id, final GetAnyReplica GetAnyReplicaOptions.Built opts = options.build(); final Transcoder transcoder = Optional.ofNullable(opts.transcoder()).orElse(environment().transcoder()); - return kvOps.getAnyReplicaReactive(opts, id) + return kvOps.getAnyReplicaReactive(opts, id, opts.readPreference()) .map(response -> GetReplicaResult.from(response, transcoder)); } @@ -779,7 +784,8 @@ public Flux lookupInAllReplicas(String id, List LookupInReplicaResult.from(response, serializer)); + return kvOps.subdocGetAllReplicasReactive(opts, id, transform(lookupInSpecs, LookupInSpec::toCore), opts.readPreference()) + .map(response -> LookupInReplicaResult.from(response, serializer)); } /** @@ -810,7 +816,7 @@ public Mono lookupInAnyReplica(final String id, final Lis LookupInAnyReplicaOptions.Built opts = options.build(); final JsonSerializer serializer = Optional.ofNullable(opts.serializer()).orElse(environment().jsonSerializer()); - return kvOps.subdocGetAnyReplicaReactive(opts, id, transform(lookupInSpecs, LookupInSpec::toCore)) + return kvOps.subdocGetAnyReplicaReactive(opts, id, transform(lookupInSpecs, LookupInSpec::toCore), opts.readPreference()) .map(response -> LookupInReplicaResult.from(response, serializer)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/ReactiveScope.java b/java-client/src/main/java/com/couchbase/client/java/ReactiveScope.java index 01503874d..951a64e8b 100644 --- a/java-client/src/main/java/com/couchbase/client/java/ReactiveScope.java +++ b/java-client/src/main/java/com/couchbase/client/java/ReactiveScope.java @@ -27,6 +27,7 @@ import com.couchbase.client.core.error.context.ReducedQueryErrorContext; import com.couchbase.client.core.error.context.ReducedSearchErrorContext; import com.couchbase.client.core.io.CollectionIdentifier; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.analytics.AnalyticsAccessor; import com.couchbase.client.java.analytics.AnalyticsOptions; import com.couchbase.client.java.analytics.ReactiveAnalyticsResult; @@ -67,6 +68,8 @@ public class ReactiveScope { */ private final AsyncScope asyncScope; + private final ReactorOps reactor; + /** * Stores already opened collections for reuse. */ @@ -78,6 +81,7 @@ public class ReactiveScope { * @param asyncScope the underlying async scope. */ ReactiveScope(final AsyncScope asyncScope) { + this.reactor = asyncScope.environment(); this.asyncScope = asyncScope; } @@ -164,7 +168,7 @@ public Mono query(final String statement, final QueryOption notNull(options, "QueryOptions", () -> new ReducedQueryErrorContext(statement)); final QueryOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return async().queryOps.queryReactive(statement, opts, asyncScope.queryContext, null, QueryAccessor::convertCoreQueryError) + return reactor.publishOnUserScheduler(async().queryOps.queryReactive(statement, opts, asyncScope.queryContext, null, QueryAccessor::convertCoreQueryError)) .map(result -> new ReactiveQueryResult(result, serializer)); } @@ -190,13 +194,13 @@ public Mono analyticsQuery(final String statement, fina notNull(options, "AnalyticsOptions", () -> new ReducedAnalyticsErrorContext(statement)); AnalyticsOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return Mono.defer(() -> { + return reactor.publishOnUserScheduler(Mono.defer(() -> { return AnalyticsAccessor.analyticsQueryReactive( asyncScope.core(), asyncScope.analyticsRequest(statement, opts), serializer ); - }); + })); } /** @@ -235,7 +239,7 @@ public Mono search(final String indexName, final SearchReq CoreSearchRequest coreRequest = searchRequest.toCore(); SearchOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return asyncScope.searchOps.searchReactive(indexName, coreRequest, opts) + return reactor.publishOnUserScheduler(asyncScope.searchOps.searchReactive(indexName, coreRequest, opts)) .map(r -> new ReactiveSearchResult(r, serializer)); } @@ -272,7 +276,7 @@ public Mono searchQuery(final String indexName, final Sear notNull(options, "SearchOptions", () -> new ReducedSearchErrorContext(indexName, coreQuery)); SearchOptions.Built opts = options.build(); JsonSerializer serializer = opts.serializer() == null ? environment().jsonSerializer() : opts.serializer(); - return asyncScope.searchOps.searchQueryReactive(indexName, coreQuery, opts) + return reactor.publishOnUserScheduler(asyncScope.searchOps.searchQueryReactive(indexName, coreQuery, opts)) .map(result -> new ReactiveSearchResult(result, serializer)); } @@ -282,6 +286,6 @@ public Mono searchQuery(final String indexName, final Sear @Stability.Volatile @SinceCouchbase("7.1") public ReactiveScopeEventingFunctionManager eventingFunctions() { - return new ReactiveScopeEventingFunctionManager(asyncScope.eventingFunctions()); + return new ReactiveScopeEventingFunctionManager(environment(), asyncScope.eventingFunctions()); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/batch/ReactiveBatchHelper.java b/java-client/src/main/java/com/couchbase/client/java/batch/ReactiveBatchHelper.java index 6c9499568..bafde0fe9 100644 --- a/java-client/src/main/java/com/couchbase/client/java/batch/ReactiveBatchHelper.java +++ b/java-client/src/main/java/com/couchbase/client/java/batch/ReactiveBatchHelper.java @@ -28,7 +28,7 @@ import com.couchbase.client.core.msg.kv.MultiObserveViaCasResponse; import com.couchbase.client.core.msg.kv.ObserveViaCasResponse; import com.couchbase.client.core.node.KeyValueLocator; -import com.couchbase.client.core.node.NodeIdentifier; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.java.Collection; import com.couchbase.client.java.cnc.evnts.BatchHelperExistsCompletedEvent; import com.couchbase.client.java.kv.GetResult; @@ -124,7 +124,7 @@ private static Flux existsBytes(final Collection collection, final java. Map> nodeEntries = new HashMap<>(config.nodes().size()); for (NodeInfo node : config.nodes()) { - nodeEntries.put(node.identifier(), new HashMap<>(ids.size() / config.nodes().size())); + nodeEntries.put(node.id(), new HashMap<>(ids.size() / config.nodes().size())); } CouchbaseBucketConfig cbc = (CouchbaseBucketConfig) config; @@ -139,7 +139,7 @@ private static Flux existsBytes(final Collection collection, final java. int partitionId = KeyValueLocator.partitionForKey(encodedId, cbc.numberOfPartitions()); int nodeId = cbc.nodeIndexForActive(partitionId, false); NodeInfo nodeInfo = cbc.nodeAtIndex(nodeId); - nodeEntries.get(nodeInfo.identifier()).put(encodedId, (short) partitionId); + nodeEntries.get(nodeInfo.id()).put(encodedId, (short) partitionId); } List> responses = new ArrayList<>(nodeEntries.size()); @@ -166,7 +166,7 @@ private static Flux existsBytes(final Collection collection, final java. responses.add(Reactor.wrap(request, request.response(), true)); } - return Flux + return env.publishOnUserScheduler(Flux .merge(responses) .flatMap(response -> Flux.fromIterable(response.observed().keySet())) .onErrorMap(throwable -> { @@ -176,7 +176,7 @@ private static Flux existsBytes(final Collection collection, final java. .doOnComplete(() -> core.context().environment().eventBus().publish(new BatchHelperExistsCompletedEvent( Duration.ofNanos(System.nanoTime() - start), new BatchErrorContext(Collections.unmodifiableList(requests)) - ))); + )))); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/env/ClusterEnvironment.java b/java-client/src/main/java/com/couchbase/client/java/env/ClusterEnvironment.java index 12c1ee536..328c02726 100644 --- a/java-client/src/main/java/com/couchbase/client/java/env/ClusterEnvironment.java +++ b/java-client/src/main/java/com/couchbase/client/java/env/ClusterEnvironment.java @@ -17,6 +17,7 @@ package com.couchbase.client.java.env; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.annotation.UsedBy; import com.couchbase.client.core.encryption.CryptoManager; import com.couchbase.client.core.env.CoreEnvironment; import com.couchbase.client.core.env.VersionAndGitHash; @@ -34,6 +35,7 @@ import java.util.Optional; import java.util.function.Consumer; +import static com.couchbase.client.core.annotation.UsedBy.Project.QUARKUS_COUCHBASE; import static com.couchbase.client.core.util.CbObjects.defaultIfNull; import static com.couchbase.client.core.util.Validators.notNull; @@ -70,6 +72,7 @@ private ClusterEnvironment(Builder builder) { * Be very careful not to reference any classes from the optional Jackson library otherwise users will get * NoClassDefFoundError when Jackson is absent. */ + @UsedBy(QUARKUS_COUCHBASE) private JsonSerializer newDefaultSerializer(CryptoManager cryptoManager) { return nonShadowedJacksonPresent() ? JacksonJsonSerializer.create(cryptoManager) diff --git a/java-client/src/main/java/com/couchbase/client/java/http/HttpTarget.java b/java-client/src/main/java/com/couchbase/client/java/http/HttpTarget.java index 0aeaa0027..68ebcc429 100644 --- a/java-client/src/main/java/com/couchbase/client/java/http/HttpTarget.java +++ b/java-client/src/main/java/com/couchbase/client/java/http/HttpTarget.java @@ -18,10 +18,8 @@ import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.msg.RequestTarget; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.service.ServiceType; - -import static java.util.Objects.requireNonNull; +import com.couchbase.client.core.topology.NodeIdentifier; /** * Specifies which Couchbase service should receive the request. diff --git a/java-client/src/main/java/com/couchbase/client/java/http/ReactiveCouchbaseHttpClient.java b/java-client/src/main/java/com/couchbase/client/java/http/ReactiveCouchbaseHttpClient.java index 2462dc8a4..45b8b19f8 100644 --- a/java-client/src/main/java/com/couchbase/client/java/http/ReactiveCouchbaseHttpClient.java +++ b/java-client/src/main/java/com/couchbase/client/java/http/ReactiveCouchbaseHttpClient.java @@ -17,6 +17,7 @@ package com.couchbase.client.java.http; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.ReactiveCluster; import reactor.core.publisher.Mono; @@ -32,10 +33,12 @@ * @see AsyncCouchbaseHttpClient */ public class ReactiveCouchbaseHttpClient { + private final ReactorOps reactor; private final AsyncCouchbaseHttpClient async; @Stability.Internal - public ReactiveCouchbaseHttpClient(AsyncCouchbaseHttpClient async) { + public ReactiveCouchbaseHttpClient(ReactorOps reactor, AsyncCouchbaseHttpClient async) { + this.reactor = requireNonNull(reactor); this.async = requireNonNull(async); } @@ -46,7 +49,7 @@ public ReactiveCouchbaseHttpClient(AsyncCouchbaseHttpClient async) { * or include the query string in the path. */ public Mono get(HttpTarget target, HttpPath path) { - return toMono(() -> async.get(target, path)); + return reactor.publishOnUserScheduler(() -> async.get(target, path)); } /** @@ -59,7 +62,7 @@ public Mono get(HttpTarget target, HttpPath path) { * */ public Mono get(HttpTarget target, HttpPath path, HttpGetOptions options) { - return toMono(() -> async.get(target, path, options)); + return reactor.publishOnUserScheduler(() -> async.get(target, path, options)); } /** @@ -68,7 +71,7 @@ public Mono get(HttpTarget target, HttpPath path, HttpGetOptions o * To specify a request body, use the overload that takes {@link HttpPostOptions}. */ public Mono post(HttpTarget target, HttpPath path) { - return toMono(() -> async.post(target, path)); + return reactor.publishOnUserScheduler(() -> async.post(target, path)); } /** @@ -86,7 +89,7 @@ public Mono post(HttpTarget target, HttpPath path) { * */ public Mono post(HttpTarget target, HttpPath path, HttpPostOptions options) { - return toMono(() -> async.post(target, path, options)); + return reactor.publishOnUserScheduler(() -> async.post(target, path, options)); } /** @@ -95,7 +98,7 @@ public Mono post(HttpTarget target, HttpPath path, HttpPostOptions * To specify a request body, use the overload that takes {@link HttpPutOptions}. */ public Mono put(HttpTarget target, HttpPath path) { - return toMono(() -> async.put(target, path)); + return reactor.publishOnUserScheduler(() -> async.put(target, path)); } /** @@ -113,20 +116,20 @@ public Mono put(HttpTarget target, HttpPath path) { * */ public Mono put(HttpTarget target, HttpPath path, HttpPutOptions options) { - return toMono(() -> async.put(target, path, options)); + return reactor.publishOnUserScheduler(() -> async.put(target, path, options)); } /** * Returns a Mono that, when subscribed, issues a DELETE request with default options. */ public Mono delete(HttpTarget target, HttpPath path) { - return toMono(() -> async.delete(target, path)); + return reactor.publishOnUserScheduler(() -> async.delete(target, path)); } /** * Returns a Mono that, when subscribed, issues a DELETE request with given options. */ public Mono delete(HttpTarget target, HttpPath path, HttpDeleteOptions options) { - return toMono(() -> async.delete(target, path, options)); + return reactor.publishOnUserScheduler(() -> async.delete(target, path, options)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/kv/GetAllReplicasOptions.java b/java-client/src/main/java/com/couchbase/client/java/kv/GetAllReplicasOptions.java index a0992a8af..0105cbf78 100644 --- a/java-client/src/main/java/com/couchbase/client/java/kv/GetAllReplicasOptions.java +++ b/java-client/src/main/java/com/couchbase/client/java/kv/GetAllReplicasOptions.java @@ -17,9 +17,11 @@ package com.couchbase.client.java.kv; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreReadPreference; import com.couchbase.client.java.CommonOptions; import com.couchbase.client.java.codec.Transcoder; import com.couchbase.client.java.json.JsonObject; +import reactor.util.annotation.Nullable; import static com.couchbase.client.core.util.Validators.notNull; @@ -29,6 +31,7 @@ public class GetAllReplicasOptions extends CommonOptions * Holds the transcoder used for decoding. */ private Transcoder transcoder; + private CoreReadPreference readPreference; /** * Creates a new set of {@link GetAllReplicasOptions}. @@ -54,6 +57,17 @@ public GetAllReplicasOptions transcoder(final Transcoder transcoder) { return this; } + /** + * Set a read preference for this operation. + * + * @see ReadPreference + * @return this to allow method chaining. + */ + public GetAllReplicasOptions readPreference(ReadPreference readPreference) { + this.readPreference = readPreference.toCore(); + return this; + } + @Stability.Internal public Built build() { return new Built(); @@ -67,6 +81,9 @@ public Transcoder transcoder() { return transcoder; } + public @Nullable CoreReadPreference readPreference() { + return readPreference; + } } } diff --git a/java-client/src/main/java/com/couchbase/client/java/kv/GetAnyReplicaOptions.java b/java-client/src/main/java/com/couchbase/client/java/kv/GetAnyReplicaOptions.java index b5d26c04a..8cb9610f9 100644 --- a/java-client/src/main/java/com/couchbase/client/java/kv/GetAnyReplicaOptions.java +++ b/java-client/src/main/java/com/couchbase/client/java/kv/GetAnyReplicaOptions.java @@ -17,8 +17,10 @@ package com.couchbase.client.java.kv; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreReadPreference; import com.couchbase.client.java.CommonOptions; import com.couchbase.client.java.codec.Transcoder; +import reactor.util.annotation.Nullable; import static com.couchbase.client.core.util.Validators.notNull; @@ -29,6 +31,8 @@ public class GetAnyReplicaOptions extends CommonOptions { */ private Transcoder transcoder; + private CoreReadPreference readPreference; + /** * Creates a new set of {@link GetAnyReplicaOptions}. * @@ -45,7 +49,7 @@ private GetAnyReplicaOptions() { * Allows to specify a custom transcoder that is used to decode the content of the result. * * @param transcoder the custom transcoder that should be used for decoding. - * @return the {@link GetOptions} to allow method chaining. + * @return this to allow method chaining. */ public GetAnyReplicaOptions transcoder(final Transcoder transcoder) { notNull(transcoder, "Transcoder"); @@ -53,6 +57,17 @@ public GetAnyReplicaOptions transcoder(final Transcoder transcoder) { return this; } + /** + * Set a read preference for this operation. + * + * @see ReadPreference + * @return this to allow method chaining. + */ + public GetAnyReplicaOptions readPreference(ReadPreference readPreference) { + this.readPreference = readPreference.toCore(); + return this; + } + @Stability.Internal public Built build() { return new Built(); @@ -66,6 +81,9 @@ public Transcoder transcoder() { return transcoder; } + public @Nullable CoreReadPreference readPreference() { + return readPreference; + } } } diff --git a/java-client/src/main/java/com/couchbase/client/java/kv/LookupInAllReplicasOptions.java b/java-client/src/main/java/com/couchbase/client/java/kv/LookupInAllReplicasOptions.java index 9f1cf7d58..409e8b4d4 100644 --- a/java-client/src/main/java/com/couchbase/client/java/kv/LookupInAllReplicasOptions.java +++ b/java-client/src/main/java/com/couchbase/client/java/kv/LookupInAllReplicasOptions.java @@ -17,8 +17,10 @@ package com.couchbase.client.java.kv; import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.core.api.kv.CoreReadPreference; import com.couchbase.client.java.CommonOptions; import com.couchbase.client.java.codec.JsonSerializer; +import reactor.util.annotation.Nullable; import static com.couchbase.client.core.util.Validators.notNull; @@ -27,6 +29,7 @@ public class LookupInAllReplicasOptions extends CommonOptions fields) { private CompletableFuture exec(final String statement, final CommonOptions.BuiltCommonOptions options, final String spanName) { - RequestSpan parent = CbTracing.newSpan( - cluster.environment().requestTracer(), spanName, options.parentSpan().orElse(null) - ); + RequestSpan parent = core.coreResources().requestTracer().requestSpan(spanName, options.parentSpan().orElse(null)); + final AnalyticsOptions analyticsOptions = toAnalyticsOptions(options).parentSpan(parent); return cluster diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/analytics/ReactiveAnalyticsIndexManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/analytics/ReactiveAnalyticsIndexManager.java index 6cd2064f8..43f4bf720 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/analytics/ReactiveAnalyticsIndexManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/analytics/ReactiveAnalyticsIndexManager.java @@ -29,6 +29,7 @@ import com.couchbase.client.core.error.InvalidArgumentException; import com.couchbase.client.core.error.LinkExistsException; import com.couchbase.client.core.error.LinkNotFoundException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.AsyncCluster; import com.couchbase.client.java.ReactiveCluster; import com.couchbase.client.java.manager.analytics.link.AnalyticsLink; @@ -40,7 +41,6 @@ import java.util.Map; import static com.couchbase.client.core.Reactor.toFlux; -import static com.couchbase.client.core.Reactor.toMono; /** * Performs management operations on analytics indexes. @@ -48,6 +48,7 @@ public class ReactiveAnalyticsIndexManager { private final AsyncAnalyticsIndexManager async; + private final ReactorOps reactor; /** * Creates a new {@link ReactiveAnalyticsIndexManager}. @@ -58,7 +59,10 @@ public class ReactiveAnalyticsIndexManager { * @param cluster the async cluster to perform the analytics queries on. */ @Stability.Internal - public ReactiveAnalyticsIndexManager(final AsyncCluster cluster) { + public ReactiveAnalyticsIndexManager( + final AsyncCluster cluster + ) { + this.reactor = cluster.environment(); this.async = new AsyncAnalyticsIndexManager(cluster); } @@ -80,7 +84,7 @@ public AsyncAnalyticsIndexManager async() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createDataverse(final String dataverseName) { - return toMono(() -> async.createDataverse(dataverseName)); + return reactor.publishOnUserScheduler(() -> async.createDataverse(dataverseName)); } /** @@ -93,7 +97,7 @@ public Mono createDataverse(final String dataverseName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createDataverse(String dataverseName, CreateDataverseAnalyticsOptions options) { - return toMono(() -> async.createDataverse(dataverseName, options)); + return reactor.publishOnUserScheduler(() -> async.createDataverse(dataverseName, options)); } /** @@ -106,7 +110,7 @@ public Mono createDataverse(String dataverseName, CreateDataverseAnalytics * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropDataverse(String dataverseName) { - return toMono(() -> async.dropDataverse(dataverseName)); + return reactor.publishOnUserScheduler(() -> async.dropDataverse(dataverseName)); } /** @@ -120,7 +124,7 @@ public Mono dropDataverse(String dataverseName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropDataverse(String dataverseName, DropDataverseAnalyticsOptions options) { - return toMono(() -> async.dropDataverse(dataverseName, options)); + return reactor.publishOnUserScheduler(() -> async.dropDataverse(dataverseName, options)); } /** @@ -131,7 +135,7 @@ public Mono dropDataverse(String dataverseName, DropDataverseAnalyticsOpti */ @Stability.Uncommitted public Flux getAllDataverses() { - return toFlux(async::getAllDataverses); + return reactor.publishOnUserScheduler(toFlux(async::getAllDataverses)); } /** @@ -143,7 +147,7 @@ public Flux getAllDataverses() { */ @Stability.Uncommitted public Flux getAllDataverses(GetAllDataversesAnalyticsOptions options) { - return toFlux(() -> async.getAllDataverses(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllDataverses(options))); } /** @@ -156,7 +160,7 @@ public Flux getAllDataverses(GetAllDataversesAnalyticsOption * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createDataset(String datasetName, String bucketName) { - return toMono(() -> async.createDataset(datasetName, bucketName)); + return reactor.publishOnUserScheduler(() -> async.createDataset(datasetName, bucketName)); } /** @@ -170,7 +174,7 @@ public Mono createDataset(String datasetName, String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createDataset(String datasetName, String bucketName, CreateDatasetAnalyticsOptions options) { - return toMono(() -> async.createDataset(datasetName, bucketName, options)); + return reactor.publishOnUserScheduler(() -> async.createDataset(datasetName, bucketName, options)); } /** @@ -182,7 +186,7 @@ public Mono createDataset(String datasetName, String bucketName, CreateDat * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropDataset(String datasetName) { - return toMono(() -> async.dropDataset(datasetName)); + return reactor.publishOnUserScheduler(() -> async.dropDataset(datasetName)); } /** @@ -195,7 +199,7 @@ public Mono dropDataset(String datasetName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropDataset(String datasetName, DropDatasetAnalyticsOptions options) { - return toMono(() -> async.dropDataset(datasetName, options)); + return reactor.publishOnUserScheduler(() -> async.dropDataset(datasetName, options)); } /** @@ -205,7 +209,7 @@ public Mono dropDataset(String datasetName, DropDatasetAnalyticsOptions op * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllDatasets() { - return toFlux(async::getAllDatasets); + return reactor.publishOnUserScheduler(toFlux(async::getAllDatasets)); } /** @@ -216,7 +220,7 @@ public Flux getAllDatasets() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllDatasets(GetAllDatasetsAnalyticsOptions options) { - return toFlux(() -> async.getAllDatasets(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllDatasets(options))); } /** @@ -232,7 +236,7 @@ public Flux getAllDatasets(GetAllDatasetsAnalyticsOptions opti * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createIndex(String indexName, String datasetName, Map fields) { - return toMono(() -> async.createIndex(indexName, datasetName, fields)); + return reactor.publishOnUserScheduler(() -> async.createIndex(indexName, datasetName, fields)); } /** @@ -249,7 +253,7 @@ public Mono createIndex(String indexName, String datasetName, Map createIndex(String indexName, String datasetName, Map fields, CreateIndexAnalyticsOptions options) { - return toMono(() -> async.createIndex(indexName, datasetName, fields, options)); + return reactor.publishOnUserScheduler(() -> async.createIndex(indexName, datasetName, fields, options)); } /** @@ -264,7 +268,7 @@ public Mono createIndex(String indexName, String datasetName, Map dropIndex(String indexName, String datasetName) { - return toMono(() -> async.dropIndex(indexName, datasetName)); + return reactor.publishOnUserScheduler(() -> async.dropIndex(indexName, datasetName)); } /** @@ -280,7 +284,7 @@ public Mono dropIndex(String indexName, String datasetName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropIndex(String indexName, String datasetName, DropIndexAnalyticsOptions options) { - return toMono(() -> async.dropIndex(indexName, datasetName, options)); + return reactor.publishOnUserScheduler(() -> async.dropIndex(indexName, datasetName, options)); } /** @@ -290,7 +294,7 @@ public Mono dropIndex(String indexName, String datasetName, DropIndexAnaly * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllIndexes() { - return toFlux(async::getAllIndexes); + return reactor.publishOnUserScheduler(toFlux(async::getAllIndexes)); } /** @@ -301,7 +305,7 @@ public Flux getAllIndexes() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllIndexes(GetAllIndexesAnalyticsOptions options) { - return toFlux(() -> async.getAllIndexes(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllIndexes(options))); } /** @@ -311,7 +315,7 @@ public Flux getAllIndexes(GetAllIndexesAnalyticsOptions options) * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono connectLink() { - return toMono(async::connectLink); + return reactor.publishOnUserScheduler(async::connectLink); } /** @@ -324,7 +328,7 @@ public Mono connectLink() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono connectLink(ConnectLinkAnalyticsOptions options) { - return toMono(() -> async.connectLink(options)); + return reactor.publishOnUserScheduler(() -> async.connectLink(options)); } /** @@ -334,7 +338,7 @@ public Mono connectLink(ConnectLinkAnalyticsOptions options) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono disconnectLink() { - return toMono(async::disconnectLink); + return reactor.publishOnUserScheduler(async::disconnectLink); } /** @@ -347,7 +351,7 @@ public Mono disconnectLink() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono disconnectLink(DisconnectLinkAnalyticsOptions options) { - return toMono(() -> async.disconnectLink(options)); + return reactor.publishOnUserScheduler(() -> async.disconnectLink(options)); } /** @@ -357,7 +361,7 @@ public Mono disconnectLink(DisconnectLinkAnalyticsOptions options) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono>> getPendingMutations() { - return toMono(async::getPendingMutations); + return reactor.publishOnUserScheduler(async::getPendingMutations); } /** @@ -368,7 +372,7 @@ public Mono>> getPendingMutations() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono>> getPendingMutations(final GetPendingMutationsAnalyticsOptions options) { - return toMono(() -> async.getPendingMutations(options)); + return reactor.publishOnUserScheduler(() -> async.getPendingMutations(options)); } /** @@ -382,7 +386,7 @@ public Mono>> getPendingMutations(final GetPending * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createLink(AnalyticsLink link) { - return toMono(() -> async.createLink(link)); + return reactor.publishOnUserScheduler(() -> async.createLink(link)); } /** @@ -397,7 +401,7 @@ public Mono createLink(AnalyticsLink link) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createLink(AnalyticsLink link, CreateLinkAnalyticsOptions options) { - return toMono(() -> async.createLink(link, options)); + return reactor.publishOnUserScheduler(() -> async.createLink(link, options)); } /** @@ -411,7 +415,7 @@ public Mono createLink(AnalyticsLink link, CreateLinkAnalyticsOptions opti * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono replaceLink(AnalyticsLink link) { - return toMono(() -> async.replaceLink(link)); + return reactor.publishOnUserScheduler(() -> async.replaceLink(link)); } /** @@ -426,7 +430,7 @@ public Mono replaceLink(AnalyticsLink link) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono replaceLink(AnalyticsLink link, ReplaceLinkAnalyticsOptions options) { - return toMono(() -> async.replaceLink(link, options)); + return reactor.publishOnUserScheduler(() -> async.replaceLink(link, options)); } /** @@ -440,7 +444,7 @@ public Mono replaceLink(AnalyticsLink link, ReplaceLinkAnalyticsOptions op * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropLink(String linkName, String dataverse) { - return toMono(() -> async.dropLink(linkName, dataverse)); + return reactor.publishOnUserScheduler(() -> async.dropLink(linkName, dataverse)); } /** @@ -455,7 +459,7 @@ public Mono dropLink(String linkName, String dataverse) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropLink(String linkName, String dataverse, DropLinkAnalyticsOptions options) { - return toMono(() -> async.dropLink(linkName, dataverse, options)); + return reactor.publishOnUserScheduler(() -> async.dropLink(linkName, dataverse, options)); } /** @@ -472,7 +476,7 @@ public Mono dropLink(String linkName, String dataverse, DropLinkAnalyticsO * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getLinks() { - return toFlux(async::getLinks); + return reactor.publishOnUserScheduler(toFlux(async::getLinks)); } /** @@ -490,7 +494,7 @@ public Flux getLinks() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getLinks(GetLinksAnalyticsOptions options) { - return toFlux(() -> async.getLinks(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getLinks(options))); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/bucket/BucketManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/bucket/BucketManager.java index d7daf8de2..bb4d58868 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/bucket/BucketManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/bucket/BucketManager.java @@ -21,6 +21,7 @@ import com.couchbase.client.core.error.BucketNotFlushableException; import com.couchbase.client.core.error.BucketNotFoundException; import com.couchbase.client.core.error.CouchbaseException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.Cluster; import java.util.Map; @@ -56,9 +57,9 @@ public class BucketManager { * @param async the underlying async manager that performs the ops. */ @Stability.Internal - public BucketManager(final AsyncBucketManager async) { + public BucketManager(final ReactorOps reactor, final AsyncBucketManager async) { this.async = requireNonNull(async); - this.reactive = new ReactiveBucketManager(async); + this.reactive = new ReactiveBucketManager(reactor, async); } /** diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/bucket/ReactiveBucketManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/bucket/ReactiveBucketManager.java index bec47388b..a56895fad 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/bucket/ReactiveBucketManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/bucket/ReactiveBucketManager.java @@ -21,12 +21,12 @@ import com.couchbase.client.core.error.BucketNotFlushableException; import com.couchbase.client.core.error.BucketNotFoundException; import com.couchbase.client.core.error.CouchbaseException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.ReactiveCluster; import reactor.core.publisher.Mono; import java.util.Map; -import static com.couchbase.client.core.Reactor.toMono; import static java.util.Objects.requireNonNull; /** @@ -42,6 +42,7 @@ public class ReactiveBucketManager { * Holds the underlying async bucket manager. */ private final AsyncBucketManager async; + private final ReactorOps reactor; /** * Creates a new {@link ReactiveBucketManager}. @@ -52,7 +53,8 @@ public class ReactiveBucketManager { * @param async the underlying async manager that performs the ops. */ @Stability.Internal - public ReactiveBucketManager(final AsyncBucketManager async) { + public ReactiveBucketManager(final ReactorOps reactor, final AsyncBucketManager async) { + this.reactor = requireNonNull(reactor); this.async = requireNonNull(async); } @@ -76,7 +78,7 @@ public AsyncBucketManager async() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createBucket(final BucketSettings settings) { - return toMono(() -> async.createBucket(settings)); + return reactor.publishOnUserScheduler(() -> async.createBucket(settings)); } /** @@ -93,7 +95,7 @@ public Mono createBucket(final BucketSettings settings) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createBucket(final BucketSettings settings, final CreateBucketOptions options) { - return toMono(() -> async.createBucket(settings, options)); + return reactor.publishOnUserScheduler(() -> async.createBucket(settings, options)); } /** @@ -119,7 +121,7 @@ public Mono createBucket(final BucketSettings settings, final CreateBucket * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono updateBucket(final BucketSettings settings) { - return toMono(() -> async.updateBucket(settings)); + return reactor.publishOnUserScheduler(() -> async.updateBucket(settings)); } /** @@ -146,7 +148,7 @@ public Mono updateBucket(final BucketSettings settings) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono updateBucket(final BucketSettings settings, final UpdateBucketOptions options) { - return toMono(() -> async.updateBucket(settings, options)); + return reactor.publishOnUserScheduler(() -> async.updateBucket(settings, options)); } /** @@ -158,7 +160,7 @@ public Mono updateBucket(final BucketSettings settings, final UpdateBucket * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropBucket(final String bucketName) { - return toMono(() -> async.dropBucket(bucketName)); + return reactor.publishOnUserScheduler(() -> async.dropBucket(bucketName)); } /** @@ -171,7 +173,7 @@ public Mono dropBucket(final String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropBucket(final String bucketName, final DropBucketOptions options) { - return toMono(() -> async.dropBucket(bucketName, options)); + return reactor.publishOnUserScheduler(() -> async.dropBucket(bucketName, options)); } /** @@ -183,7 +185,7 @@ public Mono dropBucket(final String bucketName, final DropBucketOptions op * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono getBucket(final String bucketName) { - return toMono(() -> async.getBucket(bucketName)); + return reactor.publishOnUserScheduler(() -> async.getBucket(bucketName)); } /** @@ -196,7 +198,7 @@ public Mono getBucket(final String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono getBucket(final String bucketName, final GetBucketOptions options) { - return toMono(() -> async.getBucket(bucketName, options)); + return reactor.publishOnUserScheduler(() -> async.getBucket(bucketName, options)); } /** @@ -206,7 +208,7 @@ public Mono getBucket(final String bucketName, final GetBucketOp * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono> getAllBuckets() { - return toMono(async::getAllBuckets); + return reactor.publishOnUserScheduler(async::getAllBuckets); } /** @@ -217,7 +219,7 @@ public Mono> getAllBuckets() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono> getAllBuckets(final GetAllBucketOptions options) { - return toMono(() -> async.getAllBuckets(options)); + return reactor.publishOnUserScheduler(() -> async.getAllBuckets(options)); } /** @@ -237,7 +239,7 @@ public Mono> getAllBuckets(final GetAllBucketOptions * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono flushBucket(final String bucketName) { - return toMono(() -> async.flushBucket(bucketName)); + return reactor.publishOnUserScheduler(() -> async.flushBucket(bucketName)); } /** @@ -258,7 +260,7 @@ public Mono flushBucket(final String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono flushBucket(final String bucketName, final FlushBucketOptions options) { - return toMono(() -> async.flushBucket(bucketName, options)); + return reactor.publishOnUserScheduler(() -> async.flushBucket(bucketName, options)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/collection/ReactiveCollectionManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/collection/ReactiveCollectionManager.java index 6f19827ad..242d9891d 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/collection/ReactiveCollectionManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/collection/ReactiveCollectionManager.java @@ -22,19 +22,20 @@ import com.couchbase.client.core.error.CouchbaseException; import com.couchbase.client.core.error.ScopeExistsException; import com.couchbase.client.core.error.ScopeNotFoundException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.ReactiveBucket; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.util.concurrent.CompletableFuture; -import static com.couchbase.client.core.Reactor.toMono; import static com.couchbase.client.java.manager.collection.CreateCollectionOptions.createCollectionOptions; import static com.couchbase.client.java.manager.collection.CreateScopeOptions.createScopeOptions; import static com.couchbase.client.java.manager.collection.DropCollectionOptions.dropCollectionOptions; import static com.couchbase.client.java.manager.collection.DropScopeOptions.dropScopeOptions; import static com.couchbase.client.java.manager.collection.GetAllScopesOptions.getAllScopesOptions; import static com.couchbase.client.java.manager.collection.GetScopeOptions.getScopeOptions; +import static java.util.Objects.requireNonNull; /** * The {@link ReactiveCollectionManager} provides APIs to manage collections and scopes within a bucket. @@ -46,6 +47,7 @@ public class ReactiveCollectionManager { * The underlying async collection manager. */ private final AsyncCollectionManager async; + private final ReactorOps reactor; /** * Creates a new {@link ReactiveCollectionManager}. @@ -56,8 +58,9 @@ public class ReactiveCollectionManager { * @param async the underlying async collection manager. */ @Stability.Internal - public ReactiveCollectionManager(final AsyncCollectionManager async) { - this.async = async; + public ReactiveCollectionManager(final ReactorOps reactor, final AsyncCollectionManager async) { + this.reactor = requireNonNull(reactor); + this.async = requireNonNull(async); } /** @@ -94,7 +97,7 @@ public Mono createCollection(final CollectionSpec collectionSpec) { */ @Deprecated public Mono createCollection(final CollectionSpec collectionSpec, final CreateCollectionOptions options) { - return toMono(() -> async.createCollection(collectionSpec, options)); + return reactor.publishOnUserScheduler(() -> async.createCollection(collectionSpec, options)); } /** @@ -112,7 +115,7 @@ public Mono createCollection(final CollectionSpec collectionSpec, final Cr */ @Stability.Volatile public Mono createCollection(final String scopeName, final String collectionName, final CreateCollectionSettings settings) { - return toMono(() -> async.createCollection(scopeName, collectionName, settings)); + return reactor.publishOnUserScheduler(() -> async.createCollection(scopeName, collectionName, settings)); } /** @@ -131,7 +134,7 @@ public Mono createCollection(final String scopeName, final String collecti */ @Stability.Volatile public Mono createCollection(final String scopeName, final String collectionName, final CreateCollectionSettings settings, final CreateCollectionOptions options) { - return toMono(() -> async.createCollection(scopeName, collectionName, settings, options)); + return reactor.publishOnUserScheduler(() -> async.createCollection(scopeName, collectionName, settings, options)); } /** @@ -156,7 +159,7 @@ public Mono createScope(final String scopeName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createScope(final String scopeName, final CreateScopeOptions options) { - return toMono(() -> async.createScope(scopeName, options)); + return reactor.publishOnUserScheduler(() -> async.createScope(scopeName, options)); } /** @@ -172,7 +175,7 @@ public Mono createScope(final String scopeName, final CreateScopeOptions o */ @Stability.Volatile public Mono updateCollection(String scopeName, String collectionName, UpdateCollectionSettings settings) { - return toMono(() -> async.updateCollection(scopeName, collectionName, settings)); + return reactor.publishOnUserScheduler(() -> async.updateCollection(scopeName, collectionName, settings)); } /** @@ -189,7 +192,7 @@ public Mono updateCollection(String scopeName, String collectionName, Upda */ @Stability.Volatile public Mono updateCollection(String scopeName, String collectionName, UpdateCollectionSettings settings, UpdateCollectionOptions options) { - return toMono(() -> async.updateCollection(scopeName, collectionName, settings, options)); + return reactor.publishOnUserScheduler(() -> async.updateCollection(scopeName, collectionName, settings, options)); } /** @@ -220,7 +223,7 @@ public Mono dropCollection(final CollectionSpec collectionSpec) { */ @Deprecated public Mono dropCollection(final CollectionSpec collectionSpec, final DropCollectionOptions options) { - return toMono(() -> async.dropCollection(collectionSpec, options)); + return reactor.publishOnUserScheduler(() -> async.dropCollection(collectionSpec, options)); } /** @@ -251,7 +254,7 @@ public Mono dropCollection(final String scopeName, final String collection */ @Stability.Volatile public Mono dropCollection(final String scopeName, final String collectionName, final DropCollectionOptions options) { - return toMono(() -> async.dropCollection(scopeName, collectionName, options)); + return reactor.publishOnUserScheduler(() -> async.dropCollection(scopeName, collectionName, options)); } /** @@ -276,7 +279,7 @@ public Mono dropScope(final String scopeName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropScope(final String scopeName, final DropScopeOptions options) { - return toMono(() -> async.dropScope(scopeName, options)); + return reactor.publishOnUserScheduler(() -> async.dropScope(scopeName, options)); } /** @@ -305,7 +308,7 @@ public Mono getScope(final String scopeName) { */ @Deprecated public Mono getScope(final String scopeName, final GetScopeOptions options) { - return toMono(() -> async.getScope(scopeName, options)); + return reactor.publishOnUserScheduler(() -> async.getScope(scopeName, options)); } /** @@ -326,7 +329,7 @@ public Flux getAllScopes() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllScopes(final GetAllScopesOptions options) { - return toMono(() -> async.getAllScopes(options)).flatMapMany(Flux::fromIterable); + return reactor.publishOnUserScheduler(() -> async.getAllScopes(options)).flatMapMany(Flux::fromIterable); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveEventingFunctionManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveEventingFunctionManager.java index c3d44e459..6439c4a34 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveEventingFunctionManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveEventingFunctionManager.java @@ -16,7 +16,6 @@ package com.couchbase.client.java.manager.eventing; -import com.couchbase.client.core.Reactor; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.error.BucketNotFoundException; import com.couchbase.client.core.error.CollectionNotFoundException; @@ -27,11 +26,13 @@ import com.couchbase.client.core.error.EventingFunctionNotBootstrappedException; import com.couchbase.client.core.error.EventingFunctionNotDeployedException; import com.couchbase.client.core.error.EventingFunctionNotFoundException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.ReactiveCluster; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import static com.couchbase.client.java.manager.eventing.GetAllFunctionsOptions.getAllFunctionsOptions; +import static java.util.Objects.requireNonNull; /** * Performs management operations on {@link EventingFunction EventingFunctions}. @@ -43,6 +44,7 @@ public class ReactiveEventingFunctionManager { * The underlying async function manager which performs the actual ops and does the conversions. */ private final AsyncEventingFunctionManager asyncManager; + private final ReactorOps reactor; /** * Creates a new {@link ReactiveEventingFunctionManager}. @@ -53,8 +55,9 @@ public class ReactiveEventingFunctionManager { * @param asyncManager the underlying async manager that performs the ops. */ @Stability.Internal - public ReactiveEventingFunctionManager(AsyncEventingFunctionManager asyncManager) { - this.asyncManager = asyncManager; + public ReactiveEventingFunctionManager(ReactorOps reactor, AsyncEventingFunctionManager asyncManager) { + this.reactor = requireNonNull(reactor); + this.asyncManager = requireNonNull(asyncManager); } /** @@ -99,7 +102,7 @@ public Mono upsertFunction(final EventingFunction function) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono upsertFunction(final EventingFunction function, final UpsertFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.upsertFunction(function, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.upsertFunction(function, options)); } /** @@ -124,7 +127,7 @@ public Mono getFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono getFunction(final String name, final GetFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.getFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.getFunction(name, options)); } /** @@ -149,7 +152,7 @@ public Flux getAllFunctions() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllFunctions(final GetAllFunctionsOptions options) { - return Reactor.toMono(() -> asyncManager.getAllFunctions(options)).flatMapMany(Flux::fromIterable); + return reactor.publishOnUserScheduler(() -> asyncManager.getAllFunctions(options)).flatMapMany(Flux::fromIterable); } /** @@ -186,7 +189,7 @@ public Mono dropFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropFunction(final String name, final DropFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.dropFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.dropFunction(name, options)); } /** @@ -219,7 +222,7 @@ public Mono deployFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono deployFunction(final String name, final DeployFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.deployFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.deployFunction(name, options)); } /** @@ -260,7 +263,7 @@ public Mono undeployFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono undeployFunction(final String name, final UndeployFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.undeployFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.undeployFunction(name, options)); } /** @@ -293,7 +296,7 @@ public Mono pauseFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono pauseFunction(final String name, final PauseFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.pauseFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.pauseFunction(name, options)); } /** @@ -334,7 +337,7 @@ public Mono resumeFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono resumeFunction(final String name, final ResumeFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.resumeFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.resumeFunction(name, options)); } /** @@ -355,7 +358,7 @@ public Mono functionsStatus() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono functionsStatus(final FunctionsStatusOptions options) { - return Reactor.toMono(() -> asyncManager.functionsStatus(options)); + return reactor.publishOnUserScheduler(() -> asyncManager.functionsStatus(options)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveScopeEventingFunctionManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveScopeEventingFunctionManager.java index adfc446b8..32cb9cafe 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveScopeEventingFunctionManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/eventing/ReactiveScopeEventingFunctionManager.java @@ -16,7 +16,6 @@ package com.couchbase.client.java.manager.eventing; -import com.couchbase.client.core.Reactor; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.error.BucketNotFoundException; import com.couchbase.client.core.error.CollectionNotFoundException; @@ -27,11 +26,13 @@ import com.couchbase.client.core.error.EventingFunctionNotBootstrappedException; import com.couchbase.client.core.error.EventingFunctionNotDeployedException; import com.couchbase.client.core.error.EventingFunctionNotFoundException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.ReactiveCluster; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import static com.couchbase.client.java.manager.eventing.GetAllFunctionsOptions.getAllFunctionsOptions; +import static java.util.Objects.requireNonNull; /** * Performs management operations on {@link EventingFunction EventingFunctions}. @@ -43,6 +44,7 @@ public class ReactiveScopeEventingFunctionManager { * The underlying async function manager which performs the actual ops and does the conversions. */ private final AsyncScopeEventingFunctionManager asyncManager; + private final ReactorOps reactor; /** * Creates a new {@link ReactiveScopeEventingFunctionManager}. @@ -53,8 +55,9 @@ public class ReactiveScopeEventingFunctionManager { * @param asyncManager the underlying async manager that performs the ops. */ @Stability.Internal - public ReactiveScopeEventingFunctionManager(AsyncScopeEventingFunctionManager asyncManager) { - this.asyncManager = asyncManager; + public ReactiveScopeEventingFunctionManager(ReactorOps reactor, AsyncScopeEventingFunctionManager asyncManager) { + this.reactor = requireNonNull(reactor); + this.asyncManager = requireNonNull(asyncManager); } /** @@ -99,7 +102,7 @@ public Mono upsertFunction(final EventingFunction function) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono upsertFunction(final EventingFunction function, final UpsertFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.upsertFunction(function, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.upsertFunction(function, options)); } /** @@ -124,7 +127,7 @@ public Mono getFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono getFunction(final String name, final GetFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.getFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.getFunction(name, options)); } /** @@ -149,7 +152,7 @@ public Flux getAllFunctions() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllFunctions(final GetAllFunctionsOptions options) { - return Reactor.toMono(() -> asyncManager.getAllFunctions(options)).flatMapMany(Flux::fromIterable); + return reactor.publishOnUserScheduler(() -> asyncManager.getAllFunctions(options)).flatMapMany(Flux::fromIterable); } /** @@ -186,7 +189,7 @@ public Mono dropFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropFunction(final String name, final DropFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.dropFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.dropFunction(name, options)); } /** @@ -219,7 +222,7 @@ public Mono deployFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono deployFunction(final String name, final DeployFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.deployFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.deployFunction(name, options)); } /** @@ -260,7 +263,7 @@ public Mono undeployFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono undeployFunction(final String name, final UndeployFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.undeployFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.undeployFunction(name, options)); } /** @@ -293,7 +296,7 @@ public Mono pauseFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono pauseFunction(final String name, final PauseFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.pauseFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.pauseFunction(name, options)); } /** @@ -334,7 +337,7 @@ public Mono resumeFunction(final String name) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono resumeFunction(final String name, final ResumeFunctionOptions options) { - return Reactor.toMono(() -> asyncManager.resumeFunction(name, options)); + return reactor.publishOnUserScheduler(() -> asyncManager.resumeFunction(name, options)); } /** @@ -355,7 +358,7 @@ public Mono functionsStatus() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono functionsStatus(final FunctionsStatusOptions options) { - return Reactor.toMono(() -> asyncManager.functionsStatus(options)); + return reactor.publishOnUserScheduler(() -> asyncManager.functionsStatus(options)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/query/QueryIndexManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/query/QueryIndexManager.java index c5e1c5cbc..86d59ebc3 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/query/QueryIndexManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/query/QueryIndexManager.java @@ -21,6 +21,7 @@ import com.couchbase.client.core.error.IndexExistsException; import com.couchbase.client.core.error.IndexFailureException; import com.couchbase.client.core.error.IndexNotFoundException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.Cluster; import java.time.Duration; @@ -54,9 +55,9 @@ public class QueryIndexManager { * @param async the async index manager. */ @Stability.Internal - public QueryIndexManager(final AsyncQueryIndexManager async) { + public QueryIndexManager(final ReactorOps reactor, final AsyncQueryIndexManager async) { this.async = requireNonNull(async); - this.reactive = new ReactiveQueryIndexManager(async); + this.reactive = new ReactiveQueryIndexManager(reactor, async); } /** diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveCollectionQueryIndexManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveCollectionQueryIndexManager.java index 2e646a813..f17a7d64b 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveCollectionQueryIndexManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveCollectionQueryIndexManager.java @@ -21,6 +21,7 @@ import com.couchbase.client.core.error.IndexExistsException; import com.couchbase.client.core.error.IndexFailureException; import com.couchbase.client.core.error.IndexNotFoundException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.ReactiveCluster; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -29,7 +30,6 @@ import java.util.Collection; import static com.couchbase.client.core.Reactor.toFlux; -import static com.couchbase.client.core.Reactor.toMono; import static java.util.Objects.requireNonNull; /** @@ -42,6 +42,7 @@ public class ReactiveCollectionQueryIndexManager { * The underlying async query index manager which performs the actual ops and does the conversions. */ private final AsyncCollectionQueryIndexManager async; + private final ReactorOps reactor; /** * Creates a new {@link ReactiveCollectionQueryIndexManager}. @@ -52,7 +53,8 @@ public class ReactiveCollectionQueryIndexManager { * @param async the async index manager. */ @Stability.Internal - public ReactiveCollectionQueryIndexManager(final AsyncCollectionQueryIndexManager async) { + public ReactiveCollectionQueryIndexManager(final ReactorOps reactor, final AsyncCollectionQueryIndexManager async) { + this.reactor = requireNonNull(reactor); this.async = requireNonNull(async); } @@ -66,7 +68,7 @@ public ReactiveCollectionQueryIndexManager(final AsyncCollectionQueryIndexManage * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono createIndex(final String indexName, final Collection fields) { - return toMono(() -> async.createIndex(indexName, fields)); + return reactor.publishOnUserScheduler(() -> async.createIndex(indexName, fields)); } /** @@ -81,7 +83,7 @@ public Mono createIndex(final String indexName, final Collection f */ public Mono createIndex(final String indexName, final Collection fields, final CreateQueryIndexOptions options) { - return toMono(() -> async.createIndex(indexName, fields, options)); + return reactor.publishOnUserScheduler(() -> async.createIndex(indexName, fields, options)); } /** @@ -92,7 +94,7 @@ public Mono createIndex(final String indexName, final Collection f * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono createPrimaryIndex() { - return toMono(() -> async.createPrimaryIndex()); + return reactor.publishOnUserScheduler(() -> async.createPrimaryIndex()); } /** @@ -104,7 +106,7 @@ public Mono createPrimaryIndex() { * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono createPrimaryIndex(final CreatePrimaryQueryIndexOptions options) { - return toMono(() -> async.createPrimaryIndex(options)); + return reactor.publishOnUserScheduler(() -> async.createPrimaryIndex(options)); } /** @@ -114,7 +116,7 @@ public Mono createPrimaryIndex(final CreatePrimaryQueryIndexOptions option * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Flux getAllIndexes() { - return toFlux(() -> async.getAllIndexes()); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllIndexes())); } /** @@ -125,7 +127,7 @@ public Flux getAllIndexes() { * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Flux getAllIndexes(final GetAllQueryIndexesOptions options) { - return toFlux(() -> async.getAllIndexes(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllIndexes(options))); } /** @@ -136,7 +138,7 @@ public Flux getAllIndexes(final GetAllQueryIndexesOptions options) { * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono dropPrimaryIndex() { - return toMono(() -> async.dropPrimaryIndex()); + return reactor.publishOnUserScheduler(() -> async.dropPrimaryIndex()); } /** @@ -147,7 +149,7 @@ public Mono dropPrimaryIndex() { * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono dropPrimaryIndex(final DropPrimaryQueryIndexOptions options) { - return toMono(() -> async.dropPrimaryIndex(options)); + return reactor.publishOnUserScheduler(() -> async.dropPrimaryIndex(options)); } /** @@ -159,7 +161,7 @@ public Mono dropPrimaryIndex(final DropPrimaryQueryIndexOptions options) { * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono dropIndex(final String indexName) { - return toMono(() -> async.dropIndex(indexName)); + return reactor.publishOnUserScheduler(() -> async.dropIndex(indexName)); } /** @@ -172,7 +174,7 @@ public Mono dropIndex(final String indexName) { * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono dropIndex(final String indexName, final DropQueryIndexOptions options) { - return toMono(() -> async.dropIndex(indexName, options)); + return reactor.publishOnUserScheduler(() -> async.dropIndex(indexName, options)); } /** @@ -181,7 +183,7 @@ public Mono dropIndex(final String indexName, final DropQueryIndexOptions * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono buildDeferredIndexes() { - return toMono(() -> async.buildDeferredIndexes()); + return reactor.publishOnUserScheduler(() -> async.buildDeferredIndexes()); } /** @@ -191,7 +193,7 @@ public Mono buildDeferredIndexes() { * @throws CouchbaseException if any other generic unhandled/unexpected errors. */ public Mono buildDeferredIndexes(final BuildQueryIndexOptions options) { - return toMono(() -> async.buildDeferredIndexes(options)); + return reactor.publishOnUserScheduler(() -> async.buildDeferredIndexes(options)); } /** @@ -203,7 +205,7 @@ public Mono buildDeferredIndexes(final BuildQueryIndexOptions options) { */ public Mono watchIndexes(final Collection indexNames, final Duration timeout) { - return toMono(() -> async.watchIndexes(indexNames, timeout)); + return reactor.publishOnUserScheduler(() -> async.watchIndexes(indexNames, timeout)); } /** @@ -216,6 +218,6 @@ public Mono watchIndexes(final Collection indexNames, */ public Mono watchIndexes(final Collection indexNames, final Duration timeout, final WatchQueryIndexesOptions options) { - return toMono(() -> async.watchIndexes(indexNames, timeout, options)); + return reactor.publishOnUserScheduler(() -> async.watchIndexes(indexNames, timeout, options)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveQueryIndexManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveQueryIndexManager.java index 7c67b4001..740544486 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveQueryIndexManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/query/ReactiveQueryIndexManager.java @@ -21,6 +21,7 @@ import com.couchbase.client.core.error.IndexExistsException; import com.couchbase.client.core.error.IndexFailureException; import com.couchbase.client.core.error.IndexNotFoundException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.ReactiveCluster; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -29,7 +30,6 @@ import java.util.Collection; import static com.couchbase.client.core.Reactor.toFlux; -import static com.couchbase.client.core.Reactor.toMono; import static java.util.Objects.requireNonNull; /** @@ -41,6 +41,7 @@ public class ReactiveQueryIndexManager { * The underlying async query index manager which performs the actual ops and does the conversions. */ private final AsyncQueryIndexManager async; + private final ReactorOps reactor; /** * Creates a new {@link ReactiveQueryIndexManager}. @@ -51,7 +52,8 @@ public class ReactiveQueryIndexManager { * @param async the async index manager. */ @Stability.Internal - public ReactiveQueryIndexManager(final AsyncQueryIndexManager async) { + public ReactiveQueryIndexManager(final ReactorOps reactor, final AsyncQueryIndexManager async) { + this.reactor = requireNonNull(reactor); this.async = requireNonNull(async); } @@ -78,7 +80,7 @@ public AsyncQueryIndexManager async() { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createIndex(final String bucketName, final String indexName, final Collection fields) { - return toMono(() -> async.createIndex(bucketName, indexName, fields)); + return reactor.publishOnUserScheduler(() -> async.createIndex(bucketName, indexName, fields)); } /** @@ -99,7 +101,7 @@ public Mono createIndex(final String bucketName, final String indexName, f */ public Mono createIndex(final String bucketName, final String indexName, final Collection fields, final CreateQueryIndexOptions options) { - return toMono(() -> async.createIndex(bucketName, indexName, fields, options)); + return reactor.publishOnUserScheduler(() -> async.createIndex(bucketName, indexName, fields, options)); } /** @@ -116,7 +118,7 @@ public Mono createIndex(final String bucketName, final String indexName, f * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createPrimaryIndex(final String bucketName) { - return toMono(() -> async.createPrimaryIndex(bucketName)); + return reactor.publishOnUserScheduler(() -> async.createPrimaryIndex(bucketName)); } /** @@ -134,7 +136,7 @@ public Mono createPrimaryIndex(final String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono createPrimaryIndex(final String bucketName, final CreatePrimaryQueryIndexOptions options) { - return toMono(() -> async.createPrimaryIndex(bucketName, options)); + return reactor.publishOnUserScheduler(() -> async.createPrimaryIndex(bucketName, options)); } /** @@ -150,7 +152,7 @@ public Mono createPrimaryIndex(final String bucketName, final CreatePrimar * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllIndexes(final String bucketName) { - return toFlux(() -> async.getAllIndexes(bucketName)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllIndexes(bucketName))); } /** @@ -167,7 +169,7 @@ public Flux getAllIndexes(final String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Flux getAllIndexes(final String bucketName, final GetAllQueryIndexesOptions options) { - return toFlux(() -> async.getAllIndexes(bucketName, options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllIndexes(bucketName, options))); } /** @@ -184,7 +186,7 @@ public Flux getAllIndexes(final String bucketName, final GetAllQuery * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropPrimaryIndex(final String bucketName) { - return toMono(() -> async.dropPrimaryIndex(bucketName)); + return reactor.publishOnUserScheduler(() -> async.dropPrimaryIndex(bucketName)); } /** @@ -202,7 +204,7 @@ public Mono dropPrimaryIndex(final String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropPrimaryIndex(final String bucketName, final DropPrimaryQueryIndexOptions options) { - return toMono(() -> async.dropPrimaryIndex(bucketName, options)); + return reactor.publishOnUserScheduler(() -> async.dropPrimaryIndex(bucketName, options)); } /** @@ -220,7 +222,7 @@ public Mono dropPrimaryIndex(final String bucketName, final DropPrimaryQue * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropIndex(final String bucketName, final String indexName) { - return toMono(() -> async.dropIndex(bucketName, indexName)); + return reactor.publishOnUserScheduler(() -> async.dropIndex(bucketName, indexName)); } /** @@ -239,7 +241,7 @@ public Mono dropIndex(final String bucketName, final String indexName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono dropIndex(final String bucketName, final String indexName, final DropQueryIndexOptions options) { - return toMono(() -> async.dropIndex(bucketName, indexName, options)); + return reactor.publishOnUserScheduler(() -> async.dropIndex(bucketName, indexName, options)); } /** @@ -252,7 +254,7 @@ public Mono dropIndex(final String bucketName, final String indexName, fin * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono buildDeferredIndexes(final String bucketName) { - return toMono(() -> async.buildDeferredIndexes(bucketName)); + return reactor.publishOnUserScheduler(() -> async.buildDeferredIndexes(bucketName)); } /** @@ -269,7 +271,7 @@ public Mono buildDeferredIndexes(final String bucketName) { * @throws CouchbaseException (async) if any other generic unhandled/unexpected errors. */ public Mono buildDeferredIndexes(final String bucketName, final BuildQueryIndexOptions options) { - return toMono(() -> async.buildDeferredIndexes(bucketName, options)); + return reactor.publishOnUserScheduler(() -> async.buildDeferredIndexes(bucketName, options)); } /** @@ -287,7 +289,7 @@ public Mono buildDeferredIndexes(final String bucketName, final BuildQuery */ public Mono watchIndexes(final String bucketName, final Collection indexNames, final Duration timeout) { - return toMono(() -> async.watchIndexes(bucketName, indexNames, timeout)); + return reactor.publishOnUserScheduler(() -> async.watchIndexes(bucketName, indexNames, timeout)); } /** @@ -306,7 +308,7 @@ public Mono watchIndexes(final String bucketName, final Collection */ public Mono watchIndexes(final String bucketName, final Collection indexNames, final Duration timeout, final WatchQueryIndexesOptions options) { - return toMono(() -> async.watchIndexes(bucketName, indexNames, timeout, options)); + return reactor.publishOnUserScheduler(() -> async.watchIndexes(bucketName, indexNames, timeout, options)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/search/ReactiveSearchIndexManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/search/ReactiveSearchIndexManager.java index bf1a09748..4abb07b7c 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/search/ReactiveSearchIndexManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/search/ReactiveSearchIndexManager.java @@ -16,11 +16,12 @@ package com.couchbase.client.java.manager.search; -import com.couchbase.client.core.Reactor; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.json.JsonObject; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import static com.couchbase.client.core.Reactor.toFlux; import static com.couchbase.client.java.manager.search.AllowQueryingSearchIndexOptions.allowQueryingSearchIndexOptions; import static com.couchbase.client.java.manager.search.AnalyzeDocumentOptions.analyzeDocumentOptions; import static com.couchbase.client.java.manager.search.DisallowQueryingSearchIndexOptions.disallowQueryingSearchIndexOptions; @@ -33,6 +34,7 @@ import static com.couchbase.client.java.manager.search.ResumeIngestSearchIndexOptions.resumeIngestSearchIndexOptions; import static com.couchbase.client.java.manager.search.UnfreezePlanSearchIndexOptions.unfreezePlanSearchIndexOptions; import static com.couchbase.client.java.manager.search.UpsertSearchIndexOptions.upsertSearchIndexOptions; +import static java.util.Objects.requireNonNull; /** * The {@link ReactiveSearchIndexManager} allows to manage search index structures in a couchbase cluster. @@ -42,9 +44,11 @@ public class ReactiveSearchIndexManager { private final AsyncSearchIndexManager asyncIndexManager; + private final ReactorOps reactor; - public ReactiveSearchIndexManager(final AsyncSearchIndexManager asyncIndexManager) { - this.asyncIndexManager = asyncIndexManager; + public ReactiveSearchIndexManager(final ReactorOps reactor, final AsyncSearchIndexManager asyncIndexManager) { + this.reactor = requireNonNull(reactor); + this.asyncIndexManager = requireNonNull(asyncIndexManager); } /** @@ -54,7 +58,7 @@ public ReactiveSearchIndexManager(final AsyncSearchIndexManager asyncIndexManage * @return the index definition if it exists. */ public Mono getIndex(final String name, final GetSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.getIndex(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.getIndex(name, options)); } /** @@ -63,7 +67,7 @@ public Mono getIndex(final String name, final GetSearchIndexOptions * @return all currently present indexes. */ public Flux getAllIndexes(final GetAllSearchIndexesOptions options) { - return Reactor.toFlux(() -> asyncIndexManager.getAllIndexes(options)); + return reactor.publishOnUserScheduler(toFlux(() -> asyncIndexManager.getAllIndexes(options))); } /** @@ -73,7 +77,7 @@ public Flux getAllIndexes(final GetAllSearchIndexesOptions options) * @return the number of indexed documents. */ public Mono getIndexedDocumentsCount(final String name, final GetIndexedSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.getIndexedDocumentsCount(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.getIndexedDocumentsCount(name, options)); } /** @@ -84,7 +88,7 @@ public Mono getIndexedDocumentsCount(final String name, final GetIndexedSe * @return the analyzed sections for the document. */ public Flux analyzeDocument(final String name, final JsonObject document, final AnalyzeDocumentOptions options) { - return Reactor.toFlux(() -> asyncIndexManager.analyzeDocument(name, document, options)); + return reactor.publishOnUserScheduler(toFlux(() -> asyncIndexManager.analyzeDocument(name, document, options))); } /** @@ -93,7 +97,7 @@ public Flux analyzeDocument(final String name, final JsonObject docu * @param index the index definition including name and settings. */ public Mono upsertIndex(final SearchIndex index, final UpsertSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.upsertIndex(index, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.upsertIndex(index, options)); } /** @@ -102,7 +106,7 @@ public Mono upsertIndex(final SearchIndex index, final UpsertSearchIndexOp * @param name the name of the search index. */ public Mono dropIndex(final String name, final DropSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.dropIndex(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.dropIndex(name, options)); } /** @@ -111,7 +115,7 @@ public Mono dropIndex(final String name, final DropSearchIndexOptions opti * @param name the name of the search index. */ public Mono pauseIngest(final String name, final PauseIngestSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.pauseIngest(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.pauseIngest(name, options)); } /** @@ -120,7 +124,7 @@ public Mono pauseIngest(final String name, final PauseIngestSearchIndexOpt * @param name the name of the search index. */ public Mono resumeIngest(final String name, ResumeIngestSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.resumeIngest(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.resumeIngest(name, options)); } /** @@ -129,7 +133,7 @@ public Mono resumeIngest(final String name, ResumeIngestSearchIndexOptions * @param name the name of the search index. */ public Mono allowQuerying(final String name, AllowQueryingSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.allowQuerying(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.allowQuerying(name, options)); } /** @@ -138,7 +142,7 @@ public Mono allowQuerying(final String name, AllowQueryingSearchIndexOptio * @param name the name of the search index. */ public Mono disallowQuerying(final String name, final DisallowQueryingSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.disallowQuerying(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.disallowQuerying(name, options)); } /** @@ -147,7 +151,7 @@ public Mono disallowQuerying(final String name, final DisallowQueryingSear * @param name the name of the search index. */ public Mono freezePlan(final String name, final FreezePlanSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.freezePlan(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.freezePlan(name, options)); } /** @@ -156,7 +160,7 @@ public Mono freezePlan(final String name, final FreezePlanSearchIndexOptio * @param name the name of the search index. */ public Mono unfreezePlan(final String name, final UnfreezePlanSearchIndexOptions options) { - return Reactor.toMono(() -> asyncIndexManager.unfreezePlan(name, options)); + return reactor.publishOnUserScheduler(() -> asyncIndexManager.unfreezePlan(name, options)); } /** diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/user/ReactiveUserManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/user/ReactiveUserManager.java index 6a9da406d..d59ee1e43 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/user/ReactiveUserManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/user/ReactiveUserManager.java @@ -16,42 +16,45 @@ package com.couchbase.client.java.manager.user; -import com.couchbase.client.core.Reactor; +import com.couchbase.client.core.util.ReactorOps; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import static com.couchbase.client.core.Reactor.toFlux; import static java.util.Objects.requireNonNull; public class ReactiveUserManager { private final AsyncUserManager async; + private final ReactorOps reactor; - public ReactiveUserManager(AsyncUserManager async) { + public ReactiveUserManager(ReactorOps reactor, AsyncUserManager async) { + this.reactor = requireNonNull(reactor); this.async = requireNonNull(async); } public Mono getUser(AuthDomain domain, String username) { - return Reactor.toMono(() -> async.getUser(domain, username)); + return reactor.publishOnUserScheduler(() -> async.getUser(domain, username)); } public Mono getUser(AuthDomain domain, String username, GetUserOptions options) { - return Reactor.toMono(() -> async.getUser(domain, username, options)); + return reactor.publishOnUserScheduler(() -> async.getUser(domain, username, options)); } public Flux getAllUsers() { - return Reactor.toFlux(() -> async.getAllUsers()); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllUsers())); } public Flux getAllUsers(GetAllUsersOptions options) { - return Reactor.toFlux(() -> async.getAllUsers(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllUsers(options))); } public Flux getRoles() { - return Reactor.toFlux(() -> async.getRoles()); + return reactor.publishOnUserScheduler(toFlux(() -> async.getRoles())); } public Flux getRoles(GetRolesOptions options) { - return Reactor.toFlux(() -> async.getRoles(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getRoles(options))); } /** @@ -62,7 +65,7 @@ public Flux getRoles(GetRolesOptions options) { * @param options Common options (timeout, retry...) */ public Mono changePassword(String newPassword, ChangePasswordOptions options) { - return Reactor.toMono(() -> async.changePassword(newPassword, options)); + return reactor.publishOnUserScheduler(() -> async.changePassword(newPassword, options)); } /** * Changes the password of the currently authenticated user. @@ -70,53 +73,53 @@ public Mono changePassword(String newPassword, ChangePasswordOptions optio * be valid. * @param newPassword String to replace the previous password with. */ - public Mono changePassword(String newPassword) { return Reactor.toMono(() -> async.changePassword(newPassword)); } + public Mono changePassword(String newPassword) { return reactor.publishOnUserScheduler(() -> async.changePassword(newPassword)); } public Mono upsertUser(User user) { - return Reactor.toMono(() -> async.upsertUser(user)); + return reactor.publishOnUserScheduler(() -> async.upsertUser(user)); } public Mono upsertUser(User user, UpsertUserOptions options) { - return Reactor.toMono(() -> async.upsertUser(user, options)); + return reactor.publishOnUserScheduler(() -> async.upsertUser(user, options)); } public Mono dropUser(String username) { - return Reactor.toMono(() -> async.dropUser(username)); + return reactor.publishOnUserScheduler(() -> async.dropUser(username)); } public Mono dropUser(String username, DropUserOptions options) { - return Reactor.toMono(() -> async.dropUser(username, options)); + return reactor.publishOnUserScheduler(() -> async.dropUser(username, options)); } public Mono getGroup(String groupName) { - return Reactor.toMono(() -> async.getGroup(groupName)); + return reactor.publishOnUserScheduler(() -> async.getGroup(groupName)); } public Mono getGroup(String groupName, GetGroupOptions options) { - return Reactor.toMono(() -> async.getGroup(groupName, options)); + return reactor.publishOnUserScheduler(() -> async.getGroup(groupName, options)); } public Flux getAllGroups() { - return Reactor.toFlux(() -> async.getAllGroups()); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllGroups())); } public Flux getAllGroups(GetAllGroupsOptions options) { - return Reactor.toFlux(() -> async.getAllGroups(options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllGroups(options))); } public Mono upsertGroup(Group group) { - return Reactor.toMono(() -> async.upsertGroup(group)); + return reactor.publishOnUserScheduler(() -> async.upsertGroup(group)); } public Mono upsertGroup(Group group, UpsertGroupOptions options) { - return Reactor.toMono(() -> async.upsertGroup(group, options)); + return reactor.publishOnUserScheduler(() -> async.upsertGroup(group, options)); } public Mono dropGroup(String groupName) { - return Reactor.toMono(() -> async.dropGroup(groupName)); + return reactor.publishOnUserScheduler(() -> async.dropGroup(groupName)); } public Mono dropGroup(String groupName, DropGroupOptions options) { - return Reactor.toMono(() -> async.dropGroup(groupName, options)); + return reactor.publishOnUserScheduler(() -> async.dropGroup(groupName, options)); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/user/UserManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/user/UserManager.java index 4e7558ff3..a73e79481 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/user/UserManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/user/UserManager.java @@ -16,6 +16,8 @@ package com.couchbase.client.java.manager.user; +import com.couchbase.client.core.util.ReactorOps; + import java.util.List; import static com.couchbase.client.java.AsyncUtils.block; @@ -24,8 +26,10 @@ public class UserManager { private final AsyncUserManager async; + private final ReactorOps reactor; - public UserManager(AsyncUserManager async) { + public UserManager(ReactorOps reactor, AsyncUserManager async) { + this.reactor = requireNonNull(reactor); this.async = requireNonNull(async); } @@ -34,7 +38,7 @@ public AsyncUserManager async() { } public ReactiveUserManager reactive() { - return new ReactiveUserManager(async); + return new ReactiveUserManager(reactor, async); } public UserAndMetadata getUser(AuthDomain domain, String username) { diff --git a/java-client/src/main/java/com/couchbase/client/java/manager/view/ReactiveViewIndexManager.java b/java-client/src/main/java/com/couchbase/client/java/manager/view/ReactiveViewIndexManager.java index 0183e86f6..0f781afea 100644 --- a/java-client/src/main/java/com/couchbase/client/java/manager/view/ReactiveViewIndexManager.java +++ b/java-client/src/main/java/com/couchbase/client/java/manager/view/ReactiveViewIndexManager.java @@ -16,21 +16,24 @@ package com.couchbase.client.java.manager.view; -import com.couchbase.client.core.Reactor; import com.couchbase.client.core.error.CouchbaseException; import com.couchbase.client.core.error.DesignDocumentNotFoundException; import com.couchbase.client.core.error.TimeoutException; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.java.view.DesignDocumentNamespace; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import static com.couchbase.client.core.Reactor.toFlux; import static java.util.Objects.requireNonNull; public class ReactiveViewIndexManager { private final AsyncViewIndexManager async; + private final ReactorOps reactor; - public ReactiveViewIndexManager(AsyncViewIndexManager async) { + public ReactiveViewIndexManager(ReactorOps reactor, AsyncViewIndexManager async) { + this.reactor = requireNonNull(reactor); this.async = requireNonNull(async); } @@ -44,7 +47,7 @@ public ReactiveViewIndexManager(AsyncViewIndexManager async) { * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono getDesignDocument(String name, DesignDocumentNamespace namespace) { - return Reactor.toMono(() -> async.getDesignDocument(name, namespace)); + return reactor.publishOnUserScheduler(() -> async.getDesignDocument(name, namespace)); } /** @@ -58,7 +61,7 @@ public Mono getDesignDocument(String name, DesignDocumentNamespa * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono getDesignDocument(String name, DesignDocumentNamespace namespace, GetDesignDocumentOptions options) { - return Reactor.toMono(() -> async.getDesignDocument(name, namespace, options)); + return reactor.publishOnUserScheduler(() -> async.getDesignDocument(name, namespace, options)); } /** @@ -71,7 +74,7 @@ public Mono getDesignDocument(String name, DesignDocumentNamespa * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono upsertDesignDocument(DesignDocument designDocument, DesignDocumentNamespace namespace) { - return Reactor.toMono(() -> async.upsertDesignDocument(designDocument, namespace)); + return reactor.publishOnUserScheduler(() -> async.upsertDesignDocument(designDocument, namespace)); } /** @@ -85,7 +88,7 @@ public Mono upsertDesignDocument(DesignDocument designDocument, DesignDocu * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono upsertDesignDocument(DesignDocument designDocument, DesignDocumentNamespace namespace, UpsertDesignDocumentOptions options) { - return Reactor.toMono(() -> async.upsertDesignDocument(designDocument, namespace, options)); + return reactor.publishOnUserScheduler(() -> async.upsertDesignDocument(designDocument, namespace, options)); } /** @@ -98,7 +101,7 @@ public Mono upsertDesignDocument(DesignDocument designDocument, DesignDocu * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono publishDesignDocument(String name) { - return Reactor.toMono(() -> async.publishDesignDocument(name)); + return reactor.publishOnUserScheduler(() -> async.publishDesignDocument(name)); } /** @@ -112,7 +115,7 @@ public Mono publishDesignDocument(String name) { * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono publishDesignDocument(String name, PublishDesignDocumentOptions options) { - return Reactor.toMono(() -> async.publishDesignDocument(name, options)); + return reactor.publishOnUserScheduler(() -> async.publishDesignDocument(name, options)); } /** @@ -125,7 +128,7 @@ public Mono publishDesignDocument(String name, PublishDesignDocumentOption * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono dropDesignDocument(String name, DesignDocumentNamespace namespace) { - return Reactor.toMono(() -> async.dropDesignDocument(name, namespace)); + return reactor.publishOnUserScheduler(() -> async.dropDesignDocument(name, namespace)); } /** @@ -139,7 +142,7 @@ public Mono dropDesignDocument(String name, DesignDocumentNamespace namesp * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Mono dropDesignDocument(String name, DesignDocumentNamespace namespace, DropDesignDocumentOptions options) { - return Reactor.toMono(() -> async.dropDesignDocument(name, namespace, options)); + return reactor.publishOnUserScheduler(() -> async.dropDesignDocument(name, namespace, options)); } /** @@ -150,7 +153,7 @@ public Mono dropDesignDocument(String name, DesignDocumentNamespace namesp * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Flux getAllDesignDocuments(DesignDocumentNamespace namespace) { - return Reactor.toFlux(() -> async.getAllDesignDocuments(namespace)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllDesignDocuments(namespace))); } /** @@ -162,6 +165,6 @@ public Flux getAllDesignDocuments(DesignDocumentNamespace namesp * @throws CouchbaseException (async) for all other error reasons (acts as a base type and catch-all). */ public Flux getAllDesignDocuments(DesignDocumentNamespace namespace, GetAllDesignDocumentsOptions options) { - return Reactor.toFlux(() -> async.getAllDesignDocuments(namespace, options)); + return reactor.publishOnUserScheduler(toFlux(() -> async.getAllDesignDocuments(namespace, options))); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/query/QueryAccessor.java b/java-client/src/main/java/com/couchbase/client/java/query/QueryAccessor.java index 0d454aaef..910504c55 100644 --- a/java-client/src/main/java/com/couchbase/client/java/query/QueryAccessor.java +++ b/java-client/src/main/java/com/couchbase/client/java/query/QueryAccessor.java @@ -24,8 +24,8 @@ import com.couchbase.client.core.error.transaction.internal.CoreTransactionExpiredException; import com.couchbase.client.core.error.transaction.internal.CoreTransactionFailedException; import com.couchbase.client.core.msg.query.QueryRequest; -import com.couchbase.client.core.node.NodeIdentifier; import com.couchbase.client.core.retry.RetryStrategy; +import com.couchbase.client.core.topology.NodeIdentifier; import com.couchbase.client.java.transactions.error.TransactionCommitAmbiguousException; import com.couchbase.client.java.transactions.error.TransactionExpiredException; import com.couchbase.client.java.transactions.error.TransactionFailedException; diff --git a/java-client/src/main/java/com/couchbase/client/java/query/QueryMetrics.java b/java-client/src/main/java/com/couchbase/client/java/query/QueryMetrics.java index 2f834bb05..a508702d2 100644 --- a/java-client/src/main/java/com/couchbase/client/java/query/QueryMetrics.java +++ b/java-client/src/main/java/com/couchbase/client/java/query/QueryMetrics.java @@ -96,6 +96,15 @@ public long warningCount() { @Override public String toString() { - return internal.toString(); + return "QueryMetrics{" + + "elapsedTime=" + elapsedTime() + + ", executionTime=" + executionTime() + + ", sortCount=" + sortCount() + + ", resultCount=" + resultCount() + + ", resultSize=" + resultSize() + + ", mutationCount=" + mutationCount() + + ", errorCount=" + errorCount() + + ", warningCount=" + warningCount() + + '}'; } } diff --git a/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactionAttemptContext.java b/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactionAttemptContext.java index 118542e3a..0422bf257 100644 --- a/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactionAttemptContext.java +++ b/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactionAttemptContext.java @@ -16,13 +16,13 @@ package com.couchbase.client.java.transactions; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.api.query.CoreQueryContext; import com.couchbase.client.core.api.query.CoreQueryOptions; import com.couchbase.client.core.cnc.CbTracing; import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.cnc.TracingIdentifiers; -import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.node.ObjectNode; import com.couchbase.client.core.error.DocumentNotFoundException; import com.couchbase.client.core.transaction.CoreTransactionAttemptContext; import com.couchbase.client.core.transaction.log.CoreTransactionLogger; @@ -32,17 +32,18 @@ import com.couchbase.client.java.codec.JsonSerializer; import com.couchbase.client.java.codec.Transcoder; import com.couchbase.client.java.transactions.config.TransactionGetOptions; +import com.couchbase.client.java.transactions.config.TransactionGetReplicaFromPreferredServerGroupOptions; import com.couchbase.client.java.transactions.config.TransactionInsertOptions; import com.couchbase.client.java.transactions.config.TransactionReplaceOptions; import reactor.core.publisher.Mono; -import java.util.Objects; - import static com.couchbase.client.core.cnc.TracingIdentifiers.TRANSACTION_OP_INSERT; import static com.couchbase.client.core.cnc.TracingIdentifiers.TRANSACTION_OP_REMOVE; import static com.couchbase.client.core.cnc.TracingIdentifiers.TRANSACTION_OP_REPLACE; +import static com.couchbase.client.core.util.Validators.notNull; import static com.couchbase.client.java.transactions.internal.ConverterUtil.makeCollectionIdentifier; import static com.couchbase.client.java.transactions.internal.EncodingUtil.encode; +import static java.util.Objects.requireNonNull; /** * Provides methods to allow an application's transaction logic to read, mutate, insert and delete documents, as well @@ -51,10 +52,12 @@ public class ReactiveTransactionAttemptContext { private final CoreTransactionAttemptContext internal; private final JsonSerializer serializer; + private final ReactorOps reactor; - ReactiveTransactionAttemptContext(CoreTransactionAttemptContext internal, JsonSerializer serializer) { - this.internal = Objects.requireNonNull(internal); - this.serializer = Objects.requireNonNull(serializer); + ReactiveTransactionAttemptContext(ReactorOps reactor, CoreTransactionAttemptContext internal, JsonSerializer serializer) { + this.reactor = requireNonNull(reactor); + this.internal = requireNonNull(internal); + this.serializer = requireNonNull(serializer); } @Stability.Internal @@ -87,8 +90,49 @@ public Mono get(ReactiveCollection collection, String id) */ public Mono get(ReactiveCollection collection, String id, TransactionGetOptions options) { TransactionGetOptions.Built built = options.build(); - return internal.get(makeCollectionIdentifier(collection.async()), id) - .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())); + return reactor.publishOnUserScheduler( + internal.get(makeCollectionIdentifier(collection.async()), id) + .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())) + ); + } + + /** + * A convenience wrapper around {@link #getReplicaFromPreferredServerGroup(ReactiveCollection, String, TransactionGetReplicaFromPreferredServerGroupOptions)} + * using default options. + */ + public Mono getReplicaFromPreferredServerGroup(ReactiveCollection collection, String id) { + return getReplicaFromPreferredServerGroup(collection, id, TransactionGetReplicaFromPreferredServerGroupOptions.DEFAULT); + } + + /** + * Gets a document from the specified Couchbase collection matching the specified id. + *

+ * It will be fetched only from document copies that on nodes in the preferred server group, which can + * be configured with {@link com.couchbase.client.java.env.ClusterEnvironment.Builder#preferredServerGroup(String)}. + *

+ * If no replica can be retrieved, which can include for reasons such as this preferredServerGroup not being set, + * and misconfigured server groups, then {@link com.couchbase.client.core.error.DocumentUnretrievableException} + * can be raised. It is strongly recommended that this method always be used with a fallback strategy, such as: + * + * try { + * var result = ctx.getReplicaFromPreferredServerGroup(collection, id); + * } catch (DocumentUnretrievableException err) { + * var result = ctx.get(collection, id); + * } + * + * + * @param collection the Couchbase collection the document exists on + * @param id the document's ID + * @param options options controlling the operation + * @return a TransactionGetResult containing the document + */ + public Mono getReplicaFromPreferredServerGroup(ReactiveCollection collection, String id, TransactionGetReplicaFromPreferredServerGroupOptions options) { + notNull(options, "Options"); + TransactionGetReplicaFromPreferredServerGroupOptions.Built built = options.build(); + return reactor.publishOnUserScheduler( + internal.getReplicaFromPreferredServerGroup(makeCollectionIdentifier(collection.async()), id) + .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())) + ); } /** @@ -118,10 +162,12 @@ public Mono insert(ReactiveCollection collection, String i span.lowCardinalityAttribute(TracingIdentifiers.ATTR_OPERATION, TRANSACTION_OP_INSERT); Transcoder.EncodedValue encoded = encode(content, span, serializer, built.transcoder(), internal.core().context()); - return internal.insert(makeCollectionIdentifier(collection.async()), id, encoded.encoded(), encoded.flags(), new SpanWrapper(span)) - .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())) - .doOnError(err -> span.status(RequestSpan.StatusCode.ERROR)) - .doOnTerminate(() -> span.end()); + return reactor.publishOnUserScheduler( + internal.insert(makeCollectionIdentifier(collection.async()), id, encoded.encoded(), encoded.flags(), new SpanWrapper(span)) + .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())) + .doOnError(err -> span.status(RequestSpan.StatusCode.ERROR)) + .doOnTerminate(() -> span.end()) + ); } private JsonSerializer serializer() { @@ -154,10 +200,12 @@ public Mono replace(TransactionGetResult doc, Object conte RequestSpan span = CbTracing.newSpan(internal.core().context(), TRANSACTION_OP_REPLACE, internal.span()); span.lowCardinalityAttribute(TracingIdentifiers.ATTR_OPERATION, TRANSACTION_OP_REPLACE); Transcoder.EncodedValue encoded = encode(content, span, serializer, built.transcoder(), internal.core().context()); - return internal.replace(doc.internal(), encoded.encoded(), encoded.flags(), new SpanWrapper(span)) - .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())) - .doOnError(err -> span.status(RequestSpan.StatusCode.ERROR)) - .doOnTerminate(() -> span.end()); + return reactor.publishOnUserScheduler( + internal.replace(doc.internal(), encoded.encoded(), encoded.flags(), new SpanWrapper(span)) + .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())) + .doOnError(err -> span.status(RequestSpan.StatusCode.ERROR)) + .doOnTerminate(() -> span.end()) + ); } /** @@ -168,9 +216,11 @@ public Mono replace(TransactionGetResult doc, Object conte public Mono remove(TransactionGetResult doc) { RequestSpan span = CbTracing.newSpan(internal.core().context(), TRANSACTION_OP_REMOVE, internal.span()); span.lowCardinalityAttribute(TracingIdentifiers.ATTR_OPERATION, TRANSACTION_OP_REMOVE); - return internal.remove(doc.internal(), new SpanWrapper(span)) + return reactor.publishOnUserScheduler( + internal.remove(doc.internal(), new SpanWrapper(span)) .doOnError(err -> span.status(RequestSpan.StatusCode.ERROR)) - .doOnTerminate(() -> span.end()); + .doOnTerminate(() -> span.end()) + ); } @SuppressWarnings("unused") @@ -234,10 +284,12 @@ public Mono query(final ReactiveScope scope, final String statement, final TransactionQueryOptions options) { CoreQueryOptions opts = options != null ? options.builder().build() : null; - return internal.queryBlocking(statement, - scope == null ? null : CoreQueryContext.of(scope.bucketName(), scope.name()), - opts, - false) - .map(response -> new TransactionQueryResult(response, serializer())); + return reactor.publishOnUserScheduler( + internal.queryBlocking(statement, + scope == null ? null : CoreQueryContext.of(scope.bucketName(), scope.name()), + opts, + false) + .map(response -> new TransactionQueryResult(response, serializer())) + ); } } diff --git a/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactions.java b/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactions.java index 788816a45..b1eefff44 100644 --- a/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactions.java +++ b/java-client/src/main/java/com/couchbase/client/java/transactions/ReactiveTransactions.java @@ -17,6 +17,7 @@ package com.couchbase.client.java.transactions; import com.couchbase.client.core.Core; +import com.couchbase.client.core.util.ReactorOps; import com.couchbase.client.core.annotation.Stability; import com.couchbase.client.core.transaction.CoreTransactionAttemptContext; import com.couchbase.client.core.transaction.CoreTransactionContext; @@ -25,7 +26,6 @@ import com.couchbase.client.core.transaction.config.CoreTransactionOptions; import com.couchbase.client.core.transaction.threadlocal.TransactionMarker; import com.couchbase.client.core.transaction.threadlocal.TransactionMarkerOwner; -import com.couchbase.client.core.transaction.util.CoreTransactionsSchedulers; import com.couchbase.client.java.codec.JsonSerializer; import com.couchbase.client.java.transactions.config.TransactionOptions; import com.couchbase.client.java.transactions.error.TransactionFailedException; @@ -48,6 +48,7 @@ public class ReactiveTransactions { private final CoreTransactionsReactive internal; private final JsonSerializer serializer; + private final ReactorOps reactor; @Stability.Internal public ReactiveTransactions(Core core, JsonSerializer serializer) { @@ -55,6 +56,7 @@ public ReactiveTransactions(Core core, JsonSerializer serializer) { this.internal = new CoreTransactionsReactive(core, core.context().environment().transactionsConfig()); this.serializer = Objects.requireNonNull(serializer); + this.reactor = core.environment(); } /** @@ -81,9 +83,11 @@ public ReactiveTransactions(Core core, JsonSerializer serializer) { */ public Mono run(Function> transactionLogic, @Nullable TransactionOptions options) { - return internal.run((ctx) -> transactionLogic.apply(new ReactiveTransactionAttemptContext(ctx, serializer)), options == null ? null : options.build()) + return reactor.publishOnUserScheduler( + internal.run((ctx) -> transactionLogic.apply(new ReactiveTransactionAttemptContext(reactor, ctx, serializer)), options == null ? null : options.build()) .onErrorResume(ErrorUtil::convertTransactionFailedInternal) - .map(TransactionResult::new); + .map(TransactionResult::new) + ); } /** diff --git a/java-client/src/main/java/com/couchbase/client/java/transactions/TransactionAttemptContext.java b/java-client/src/main/java/com/couchbase/client/java/transactions/TransactionAttemptContext.java index f8b64a397..999fc6c77 100644 --- a/java-client/src/main/java/com/couchbase/client/java/transactions/TransactionAttemptContext.java +++ b/java-client/src/main/java/com/couchbase/client/java/transactions/TransactionAttemptContext.java @@ -22,11 +22,7 @@ import com.couchbase.client.core.cnc.CbTracing; import com.couchbase.client.core.cnc.RequestSpan; import com.couchbase.client.core.cnc.TracingIdentifiers; -import com.couchbase.client.core.deps.com.fasterxml.jackson.databind.node.ObjectNode; import com.couchbase.client.core.error.CouchbaseException; -import com.couchbase.client.core.error.EncodingFailureException; -import com.couchbase.client.core.json.Mapper; -import com.couchbase.client.core.msg.query.QueryRequest; import com.couchbase.client.core.transaction.CoreTransactionAttemptContext; import com.couchbase.client.core.transaction.log.CoreTransactionLogger; import com.couchbase.client.core.transaction.support.SpanWrapper; @@ -36,11 +32,10 @@ import com.couchbase.client.java.codec.Transcoder; import com.couchbase.client.java.json.JsonObject; import com.couchbase.client.java.transactions.config.TransactionGetOptions; +import com.couchbase.client.java.transactions.config.TransactionGetReplicaFromPreferredServerGroupOptions; import com.couchbase.client.java.transactions.config.TransactionInsertOptions; import com.couchbase.client.java.transactions.config.TransactionReplaceOptions; -import reactor.util.annotation.Nullable; -import java.io.IOException; import java.util.Objects; import static com.couchbase.client.core.cnc.TracingIdentifiers.TRANSACTION_OP_INSERT; @@ -108,6 +103,44 @@ public TransactionGetResult get(Collection collection, String id, TransactionGet .block(); } + /** + * A convenience wrapper around {@link #getReplicaFromPreferredServerGroup(Collection, String, TransactionGetReplicaFromPreferredServerGroupOptions)} + * using default options. + */ + public TransactionGetResult getReplicaFromPreferredServerGroup(Collection collection, String id) { + return getReplicaFromPreferredServerGroup(collection, id, TransactionGetReplicaFromPreferredServerGroupOptions.DEFAULT); + } + + /** + * Gets a document from the specified Couchbase collection matching the specified id. + *

+ * It will be fetched only from document copies that on nodes in the preferred server group, which can + * be configured with {@link com.couchbase.client.java.env.ClusterEnvironment.Builder#preferredServerGroup(String)}. + *

+ * If no replica can be retrieved, which can include for reasons such as this preferredServerGroup not being set, + * and misconfigured server groups, then {@link com.couchbase.client.core.error.DocumentUnretrievableException} + * can be raised. It is strongly recommended that this method always be used with a fallback strategy, such as: + * + * try { + * var result = ctx.getReplicaFromPreferredServerGroup(collection, id); + * } catch (DocumentUnretrievableException err) { + * var result = ctx.get(collection, id); + * } + * + * + * @param collection the Couchbase collection the document exists on + * @param id the document's ID + * @param options options controlling the operation + * @return a TransactionGetResult containing the document + */ + public TransactionGetResult getReplicaFromPreferredServerGroup(Collection collection, String id, TransactionGetReplicaFromPreferredServerGroupOptions options) { + notNull(options, "Options"); + TransactionGetReplicaFromPreferredServerGroupOptions.Built built = options.build(); + return internal.getReplicaFromPreferredServerGroup(makeCollectionIdentifier(collection.async()), id) + .map(result -> new TransactionGetResult(result, serializer(), built.transcoder())) + .block(); + } + /** * Mutates the specified doc with new content. *

diff --git a/java-client/src/main/java/com/couchbase/client/java/transactions/config/TransactionGetReplicaFromPreferredServerGroupOptions.java b/java-client/src/main/java/com/couchbase/client/java/transactions/config/TransactionGetReplicaFromPreferredServerGroupOptions.java new file mode 100644 index 000000000..67a1ef696 --- /dev/null +++ b/java-client/src/main/java/com/couchbase/client/java/transactions/config/TransactionGetReplicaFromPreferredServerGroupOptions.java @@ -0,0 +1,71 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.couchbase.client.java.transactions.config; + +import com.couchbase.client.core.annotation.SinceCouchbase; +import com.couchbase.client.core.annotation.Stability; +import com.couchbase.client.java.codec.Transcoder; +import reactor.util.annotation.Nullable; + +/** + * Operations controlling a transactional getReplicaFromPreferredServerGroup. + */ +@Stability.Volatile +public class TransactionGetReplicaFromPreferredServerGroupOptions { + public static final TransactionGetReplicaFromPreferredServerGroupOptions DEFAULT = new TransactionGetReplicaFromPreferredServerGroupOptions(); + + private @Nullable Transcoder transcoder; + + private TransactionGetReplicaFromPreferredServerGroupOptions() { + } + + public static TransactionGetReplicaFromPreferredServerGroupOptions transactionGetReplicaFromPreferredServerGroupOptions() { + return new TransactionGetReplicaFromPreferredServerGroupOptions(); + } + + /** + * Specify a custom {@link Transcoder} that is used to decode the content of the result. + *

+ * If not-specified, the {@link com.couchbase.client.java.env.ClusterEnvironment}'s {@link com.couchbase.client.java.codec.JsonSerializer} + * (NOT transcoder) is used. + *

+ * It is marked as being available from 7.6.2 because prior to this, only JSON documents were supported in transactions. This release added + * support for binary documents. + * + * @param transcoder the custom transcoder that should be used for decoding. + * @return this to allow method chaining. + */ + @SinceCouchbase("7.6.2") + public TransactionGetReplicaFromPreferredServerGroupOptions transcoder(Transcoder transcoder) { + this.transcoder = transcoder; + return this; + } + + @Stability.Internal + public TransactionGetReplicaFromPreferredServerGroupOptions.Built build() { + return new TransactionGetReplicaFromPreferredServerGroupOptions.Built(); + } + + @Stability.Internal + public class Built { + Built() { + } + + public Transcoder transcoder() { + return transcoder; + } + } +} diff --git a/java-client/src/main/java/com/couchbase/client/java/transactions/internal/TransactionsSupportedExtensionsUtil.java b/java-client/src/main/java/com/couchbase/client/java/transactions/internal/TransactionsSupportedExtensionsUtil.java index 5a114bda9..ea14b97c2 100644 --- a/java-client/src/main/java/com/couchbase/client/java/transactions/internal/TransactionsSupportedExtensionsUtil.java +++ b/java-client/src/main/java/com/couchbase/client/java/transactions/internal/TransactionsSupportedExtensionsUtil.java @@ -56,7 +56,10 @@ public class TransactionsSupportedExtensionsUtil { /* @since 3.7.0 */ CoreTransactionsExtension.EXT_BINARY_SUPPORT, + /* @since 3.7.3 */ + CoreTransactionsExtension.EXT_PARALLEL_UNSTAGING, + /* @since 3.7.4 */ - CoreTransactionsExtension.EXT_PARALLEL_UNSTAGING + CoreTransactionsExtension.EXT_REPLICA_FROM_PREFERRED_GROUP ); } diff --git a/java-examples/pom.xml b/java-examples/pom.xml index a61446ec7..f79655f91 100644 --- a/java-examples/pom.xml +++ b/java-examples/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 java-examples - 1.7.3 + 1.7.6 Couchbase Java SDK Examples diff --git a/java-fit-performer/pom.xml b/java-fit-performer/pom.xml index 38f418894..e36d8e603 100644 --- a/java-fit-performer/pom.xml +++ b/java-fit-performer/pom.xml @@ -7,7 +7,7 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 fit-performer-java-sdk diff --git a/java-fit-performer/src/main/java/com/couchbase/JavaPerformer.java b/java-fit-performer/src/main/java/com/couchbase/JavaPerformer.java index da76fd010..79ef92e94 100644 --- a/java-fit-performer/src/main/java/com/couchbase/JavaPerformer.java +++ b/java-fit-performer/src/main/java/com/couchbase/JavaPerformer.java @@ -81,6 +81,7 @@ import com.couchbase.utils.Capabilities; import com.couchbase.utils.ClusterConnection; import com.couchbase.utils.OptionsUtil; +import com.couchbase.utils.UserSchedulerUtil; import io.grpc.Server; import io.grpc.ServerBuilder; import io.grpc.Status; @@ -232,6 +233,12 @@ public void clusterConnectionCreate(ClusterConnectionCreateRequest request, var clusterEnvironment = OptionsUtil.convertClusterConfig(request, getCluster, onClusterConnectionClose); + // [if:3.7.5] first version that allows specifying custom publishOn scheduler + var userExecutorAndScheduler = UserSchedulerUtil.userExecutorAndScheduler(); + onClusterConnectionClose.add(userExecutorAndScheduler::dispose); + clusterEnvironment.publishOnScheduler(userExecutorAndScheduler::scheduler); + // [end] + var connection = new ClusterConnection(request.getClusterHostname(), request.getClusterUsername(), request.getClusterPassword(), diff --git a/java-fit-performer/src/main/java/com/couchbase/JavaSdkCommandExecutor.java b/java-fit-performer/src/main/java/com/couchbase/JavaSdkCommandExecutor.java index 5ba4b1558..0a7959e25 100644 --- a/java-fit-performer/src/main/java/com/couchbase/JavaSdkCommandExecutor.java +++ b/java-fit-performer/src/main/java/com/couchbase/JavaSdkCommandExecutor.java @@ -1020,20 +1020,13 @@ public static void populateResult(com.couchbase.client.protocol.sdk.query.Comman var builder = com.couchbase.client.protocol.sdk.query.QueryResult.newBuilder(); - var content = ContentAsUtil.contentTypeList(request.getContentAs(), - () -> values.rowsAs(byte[].class), - () -> values.rowsAs(String.class), - () -> values.rowsAs(JsonObject.class), - () -> values.rowsAs(JsonArray.class), - () -> values.rowsAs(Boolean.class), - () -> values.rowsAs(Integer.class), - () -> values.rowsAs(Double.class)); - - if (content.isFailure()) { - throw content.exception(); - } + var contentAs = request.getContentAs(); + var rowType = ContentAsUtil.toJavaClass(contentAs); + var content = values.rowsAs(rowType).stream() + .map(row -> ContentAsUtil.toFitContent(row, contentAs)) + .toList(); - builder.addAllContent(content.value()); + builder.addAllContent(content); // Metadata var convertedMetaData = convertMetaData(values.metaData()); @@ -1349,7 +1342,11 @@ public static com.couchbase.client.java.kv.MutateInMacro convertMacro (MutateInM var out = GetAllReplicasOptions.getAllReplicasOptions(); if (opts.hasTimeoutMsecs()) out.timeout(Duration.ofMillis(opts.getTimeoutMsecs())); if (opts.hasTranscoder()) out.transcoder(convertTranscoder(opts.getTranscoder())); - + // [if:3.7.4] + if (opts.hasReadPreference()) { + out.readPreference(convertReadPreference(opts.getReadPreference())); + } + // [end] return out; } else return null; @@ -1361,7 +1358,11 @@ public static com.couchbase.client.java.kv.MutateInMacro convertMacro (MutateInM var out = GetAnyReplicaOptions.getAnyReplicaOptions(); if (opts.hasTimeoutMsecs()) out.timeout(Duration.ofMillis(opts.getTimeoutMsecs())); if (opts.hasTranscoder()) out.transcoder(convertTranscoder(opts.getTranscoder())); - + // [if:3.7.4] + if (opts.hasReadPreference()) { + out.readPreference(convertReadPreference(opts.getReadPreference())); + } + // [end] return out; } else return null; @@ -1544,4 +1545,17 @@ public static WaitUntilReadyOptions waitUntilReadyOptions(WaitUntilReadyRequest return options; } + + // [if:3.7.4] + public static com.couchbase.client.java.kv.ReadPreference convertReadPreference(com.couchbase.client.protocol.shared.ReadPreference readPreference) { + return switch (readPreference) { + case NO_PREFERENCE -> com.couchbase.client.java.kv.ReadPreference.NO_PREFERENCE; + case SELECTED_SERVER_GROUP -> com.couchbase.client.java.kv.ReadPreference.PREFERRED_SERVER_GROUP; + case SELECTED_SERVER_GROUP_OR_ALL_AVAILABLE -> throw new UnsupportedOperationException(); + default -> throw new UnsupportedOperationException("Read preference not handled " + readPreference); + }; + } + // [else] + //? public static Object convertReadPreference(com.couchbase.client.protocol.shared.ReadPreference readPreference) { return null; } + // [end] } diff --git a/java-fit-performer/src/main/java/com/couchbase/LookupInHelper.java b/java-fit-performer/src/main/java/com/couchbase/LookupInHelper.java index 6e4cd5373..d6c108f89 100644 --- a/java-fit-performer/src/main/java/com/couchbase/LookupInHelper.java +++ b/java-fit-performer/src/main/java/com/couchbase/LookupInHelper.java @@ -46,8 +46,10 @@ import java.util.stream.Stream; import static com.couchbase.JavaSdkCommandExecutor.convertExceptionShared; +import static com.couchbase.JavaSdkCommandExecutor.convertReadPreference; import static com.couchbase.JavaSdkCommandExecutor.setSuccess; import static com.couchbase.client.performer.core.util.TimeUtil.getTimeNow; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; public class LookupInHelper { public static Result.Builder handleLookupIn(PerRun perRun, @@ -247,7 +249,7 @@ private static Mono handleLookupInReactive( result = collection.lookupIn(docId, specs); } - return result.doOnNext(r -> { + return withSchedulerCheck(result).doOnNext(r -> { out.setElapsedNanos(System.nanoTime() - start); if (command.getReturnResult()) { @@ -284,7 +286,7 @@ private static Mono handleLookupInAnyReplicaReactive( result = collection.lookupInAnyReplica(docId, specs); } - return result + return withSchedulerCheck(result) .doOnError(err -> err.printStackTrace()) .doFinally(v -> System.out.println("Finished")) .doOnNext(v -> { @@ -331,7 +333,7 @@ private static Mono handleLookupInAllReplicasReactive( out.setElapsedNanos(System.nanoTime() - start); var streamer = new FluxStreamer( - results, + withSchedulerCheck(results), perRun, req.getStreamConfig().getStreamId(), req.getStreamConfig(), @@ -417,6 +419,11 @@ private static List mapSpecs(List mapSpecs(List performOperationReactive(com.couchbase.client.protocol.sdk. Mono mr; if (options == null) mr = collection.insert(docId, content); else mr = collection.insert(docId, content, options); - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) populateResult(result, r); else setSuccess(result); @@ -124,7 +122,7 @@ private Mono performOperationReactive(com.couchbase.client.protocol.sdk. Mono gr; if (options == null) gr = collection.get(docId); else gr = collection.get(docId, options); - return gr.map(r -> { + return withSchedulerCheck(gr).map(r -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) populateResult(request.getContentAs(), result, r); else setSuccess(result); @@ -141,7 +139,7 @@ private Mono performOperationReactive(com.couchbase.client.protocol.sdk. if (options == null) mr = collection.remove(docId); else mr = collection.remove(docId, options); result.setElapsedNanos(System.nanoTime() - start); - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { if (op.getReturnResult()) populateResult(result, r); else setSuccess(result); return result.build(); @@ -157,7 +155,7 @@ private Mono performOperationReactive(com.couchbase.client.protocol.sdk. Mono mr; if (options == null) mr = collection.replace(docId, content); else mr = collection.replace(docId, content, options); - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) populateResult(result, r); else setSuccess(result); @@ -175,7 +173,7 @@ private Mono performOperationReactive(com.couchbase.client.protocol.sdk. if (options == null) mr = collection.upsert(docId, content); else mr = collection.upsert(docId, content, options); result.setElapsedNanos(System.nanoTime() - start); - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { if (op.getReturnResult()) populateResult(result, r); else setSuccess(result); return result.build(); @@ -192,7 +190,7 @@ private Mono performOperationReactive(com.couchbase.client.protocol.sdk. if (options != null) results = collection.scan(scanType, options); else results = collection.scan(scanType); result.setElapsedNanos(System.nanoTime() - start); - var streamer = new FluxStreamer(results, perRun, request.getStreamConfig().getStreamId(), request.getStreamConfig(), + var streamer = new FluxStreamer(withSchedulerCheck(results), perRun, request.getStreamConfig().getStreamId(), request.getStreamConfig(), (ScanResult r) -> processScanResult(request, r), (Throwable err) -> convertException(err)); perRun.streamerOwner().addAndStart(streamer); @@ -247,7 +245,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res } else { gr = collection.getAndLock(docId, Duration.ofSeconds(duration.getSeconds()), options); } - return gr.map(r -> { + return withSchedulerCheck(gr).map(r -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) { populateResult(request.getContentAs(), result, r); @@ -271,7 +269,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res } else { gr = collection.unlock(docId, cas, options); } - return gr.then(Mono.fromCallable(() -> { + return withSchedulerCheck(gr).then(Mono.fromCallable(() -> { result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); return result.build(); @@ -296,7 +294,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res } else { gr = collection.getAndTouch(docId, expiry, options); } - return gr.map(r -> { + return withSchedulerCheck(gr).map(r -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) { populateResult(request.getContentAs(), result, r); @@ -325,7 +323,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res } else { mr = collection.touch(docId, expiry, options); } - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) { populateResult(result, r); @@ -340,7 +338,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res var request = clc.getExists(); var docId = getDocId(request.getLocation()); var exists = collection.exists(docId); - return exists.map(r -> { + return withSchedulerCheck(exists).map(r -> { if (op.getReturnResult()) { populateResult(result, r); } else { @@ -360,7 +358,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res } else { mr = collection.mutateIn(docId, request.getSpecList().stream().map(v -> convertMutateInSpec(v)).toList(), options); } - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { if (op.getReturnResult()) { populateResult(result, r, request); } else { @@ -383,7 +381,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res results = collection.getAllReplicas(docId, options); } result.setElapsedNanos(System.nanoTime() - start); - var streamer = new FluxStreamer<>(results, perRun, request.getStreamConfig().getStreamId(), request.getStreamConfig(), + var streamer = new FluxStreamer<>(withSchedulerCheck(results), perRun, request.getStreamConfig().getStreamId(), request.getStreamConfig(), (GetReplicaResult r) -> processGetAllReplicasResult(request, r), this::convertException); perRun.streamerOwner().addAndStart(streamer); @@ -406,7 +404,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res } else { gr = collection.getAnyReplica(docId, options); } - return gr.map(r -> { + return withSchedulerCheck(gr).map(r -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) { populateResult(result, r, request.getContentAs()); @@ -434,7 +432,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res cr = collection.binary().increment(docId, options); } result.setElapsedNanos(System.nanoTime() - start); - return cr.map(r -> { + return withSchedulerCheck(cr).map(r -> { if (op.getReturnResult()) { populateResult(result, r); } else { @@ -457,7 +455,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res cr = collection.binary().decrement(docId, options); } result.setElapsedNanos(System.nanoTime() - start); - return cr.map(r -> { + return withSchedulerCheck(cr).map(r -> { if (op.getReturnResult()) { populateResult(result, r); } else { @@ -480,7 +478,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res mr = collection.binary().append(docId, request.getContent().toByteArray(), options); } result.setElapsedNanos(System.nanoTime() - start); - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { if (op.getReturnResult()) { populateResult(result, r); } else { @@ -503,7 +501,7 @@ private Mono handleCollectionLevelCommand(Command op, PerRun perRun, Res mr = collection.binary().prepend(docId, request.getContent().toByteArray(), options); } result.setElapsedNanos(System.nanoTime() - start); - return mr.map(r -> { + return withSchedulerCheck(mr).map(r -> { if (op.getReturnResult()) { populateResult(result, r); } else { @@ -540,7 +538,7 @@ private Mono handleScopeLevelCommand(Command op, PerRun perRun, Result.B queryResult = scope.reactive().query(query); } - return returnQueryResult(request, queryResult, result, start); + return returnQueryResult(request, withSchedulerCheck(queryResult), result, start); } // [end] @@ -584,7 +582,7 @@ private Mono handleBucketLevelCommand(Command op, PerRun perRun, Result. response = bucket.waitUntilReady(timeout); } - return response.then(Mono.fromCallable(() -> { + return withSchedulerCheck(response).then(Mono.fromCallable(() -> { setSuccess(result); return result.build(); })); @@ -617,7 +615,7 @@ private Mono handleClusterLevelCommand(Command op, PerRun perRun, Result response = cluster.waitUntilReady(timeout); } - return response.then(Mono.fromCallable(() -> { + return withSchedulerCheck(response).then(Mono.fromCallable(() -> { setSuccess(result); return result.build(); })); @@ -676,7 +674,7 @@ else if (clc.hasEventingFunctionManager()) { queryResult = connection.cluster().reactive().query(query); } - return returnQueryResult(request, queryResult, result, start); + return returnQueryResult(request, withSchedulerCheck(queryResult), result, start); } return Mono.error(new UnsupportedOperationException("Unknown command " + op)); @@ -687,37 +685,25 @@ protected Exception convertException(Throwable raw) { return convertExceptionShared(raw); } - private Mono returnQueryResult(com.couchbase.client.protocol.sdk.query.Command request, Mono queryResult, Result.Builder result, Long start) { - return queryResult.publishOn(Schedulers.boundedElastic()).map(r -> { - result.setElapsedNanos(System.nanoTime() - start); - - var builder = com.couchbase.client.protocol.sdk.query.QueryResult.newBuilder(); - - // FIT only supports testing blocking (not streaming) queries currently, so the .block() here to gather - // the rows is fine. - var content = ContentAsUtil.contentTypeList(request.getContentAs(), - () -> r.rowsAs(byte[].class).collectList().block(), - () -> r.rowsAs(String.class).collectList().block(), - () -> r.rowsAs(JsonObject.class).collectList().block(), - () -> r.rowsAs(JsonArray.class).collectList().block(), - () -> r.rowsAs(Boolean.class).collectList().block(), - () -> r.rowsAs(Integer.class).collectList().block(), - () -> r.rowsAs(Double.class).collectList().block()); - - if (content.isFailure()) { - throw content.exception(); - } - - builder.addAllContent(content.value()); - - // Metadata - var convertedMetaData = convertMetaData(r.metaData().block()); - builder.setMetaData(convertedMetaData); - - result.setSdk(com.couchbase.client.protocol.sdk.Result.newBuilder() - .setQueryResult(builder)); + private Mono returnQueryResult(com.couchbase.client.protocol.sdk.query.Command request, Mono queryResult, Result.Builder result, Long start) { + return queryResult.flatMap(r -> { + result.setElapsedNanos(System.nanoTime() - start); - return result.build(); - }); - } + var contentAs = request.getContentAs(); + var rowType = ContentAsUtil.toJavaClass(contentAs); + return r.rowsAs(rowType) + .map(it -> ContentAsUtil.toFitContent(it, contentAs)) + .collectList() + .zipWith(r.metaData().map(JavaSdkCommandExecutor::convertMetaData)) + .map(fitRowsAndMetadata -> { + var fitQueryResult = com.couchbase.client.protocol.sdk.query.QueryResult.newBuilder() + .addAllContent(fitRowsAndMetadata.getT1()) + .setMetaData(fitRowsAndMetadata.getT2()); + + result.setSdk(com.couchbase.client.protocol.sdk.Result.newBuilder() + .setQueryResult(fitQueryResult)); + return result.build(); + }); + }); + } } diff --git a/java-fit-performer/src/main/java/com/couchbase/eventing/EventingHelper.java b/java-fit-performer/src/main/java/com/couchbase/eventing/EventingHelper.java index 531570996..43682977b 100644 --- a/java-fit-performer/src/main/java/com/couchbase/eventing/EventingHelper.java +++ b/java-fit-performer/src/main/java/com/couchbase/eventing/EventingHelper.java @@ -30,6 +30,7 @@ import java.util.concurrent.ConcurrentHashMap; import static com.couchbase.utils.OptionsUtil.convertDuration; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; public class EventingHelper { @@ -82,7 +83,7 @@ public static Mono handleEventingFunctionManagerReactive(ReactiveCluster } - return response.map(r -> { + return withSchedulerCheck(response).map(r -> { minimalEventingFunctionFromResult(result, r); return result.build(); }); diff --git a/java-fit-performer/src/main/java/com/couchbase/manager/BucketManagerHelper.java b/java-fit-performer/src/main/java/com/couchbase/manager/BucketManagerHelper.java index 4a8b04da7..0ec87c217 100644 --- a/java-fit-performer/src/main/java/com/couchbase/manager/BucketManagerHelper.java +++ b/java-fit-performer/src/main/java/com/couchbase/manager/BucketManagerHelper.java @@ -43,6 +43,7 @@ import java.util.concurrent.ConcurrentHashMap; import static com.couchbase.JavaSdkCommandExecutor.setSuccess; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; public class BucketManagerHelper { @@ -157,7 +158,7 @@ public static Mono handleBucketManagerReactive(ReactiveCluster cluster, var options = createBucketOptions(request.getOptions(), spans); response = cluster.buckets().createBucket(createBucketSettings(request.getSettings(), spans), options); } - return response.then(Mono.fromCallable(() -> { + return withSchedulerCheck(response).then(Mono.fromCallable(() -> { populateResult(start, result, null); result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); @@ -173,7 +174,7 @@ public static Mono handleBucketManagerReactive(ReactiveCluster cluster, var options = updateBucketOptions(request.getOptions(), spans); response = cluster.buckets().updateBucket(updateBucketSettings(request.getSettings(), spans), options); } - return response.then(Mono.fromCallable(() -> { + return withSchedulerCheck(response).then(Mono.fromCallable(() -> { populateResult(start, result, null); result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); @@ -189,7 +190,7 @@ public static Mono handleBucketManagerReactive(ReactiveCluster cluster, var options = dropBucketOptions(request.getOptions(), spans); response = cluster.buckets().dropBucket(request.getBucketName(), options); } - return response.then(Mono.fromCallable(() -> { + return withSchedulerCheck(response).then(Mono.fromCallable(() -> { populateResult(start, result, null); result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); @@ -205,7 +206,7 @@ public static Mono handleBucketManagerReactive(ReactiveCluster cluster, var options = flushBucketOptions(request.getOptions(), spans); response = cluster.buckets().flushBucket(request.getBucketName(), options); } - return response.then(Mono.fromCallable(() -> { + return withSchedulerCheck(response).then(Mono.fromCallable(() -> { populateResult(start, result, null); result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); @@ -220,7 +221,7 @@ public static Mono handleBucketManagerReactive(ReactiveCluster cluster, var options = createGetAllBucketsOptions(request.getOptions(), spans); response = cluster.buckets().getAllBuckets(options); } - return response.map(rr -> { + return withSchedulerCheck(response).map(rr -> { rr.forEach((name, settings) -> populateResult(start, result, settings)); return result.build(); diff --git a/java-fit-performer/src/main/java/com/couchbase/manager/CollectionManagerHelper.java b/java-fit-performer/src/main/java/com/couchbase/manager/CollectionManagerHelper.java index 1a5efec5d..f9bfb7608 100644 --- a/java-fit-performer/src/main/java/com/couchbase/manager/CollectionManagerHelper.java +++ b/java-fit-performer/src/main/java/com/couchbase/manager/CollectionManagerHelper.java @@ -41,6 +41,7 @@ import java.util.stream.Collectors; import static com.couchbase.JavaSdkCommandExecutor.setSuccess; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; public class CollectionManagerHelper { @@ -137,7 +138,7 @@ public static Mono handleCollectionManagerReactive(ReactiveCluster clust var options = createGetAllScopesOptions(request.getOptions(), spans); response = cluster.bucket(bucketName).collections().getAllScopes(options); } - Flux f = response.map(r -> { + Flux f = withSchedulerCheck(response).map(r -> { populateResult(result, r); return result.build(); }); @@ -151,7 +152,7 @@ public static Mono handleCollectionManagerReactive(ReactiveCluster clust } else { r = collections.createScope(request.getName()); } - return r.then(Mono.fromSupplier(() -> { + return withSchedulerCheck(r).then(Mono.fromSupplier(() -> { setSuccess(result); return result.build(); })); @@ -164,7 +165,7 @@ public static Mono handleCollectionManagerReactive(ReactiveCluster clust } else { r = collections.dropScope(request.getName()); } - return r.then(Mono.fromSupplier(() -> { + return withSchedulerCheck(r).then(Mono.fromSupplier(() -> { setSuccess(result); return result.build(); })); @@ -178,7 +179,7 @@ public static Mono handleCollectionManagerReactive(ReactiveCluster clust } else { r = collections.createCollection(request.getScopeName(), request.getName(), settings); } - return r.then(Mono.fromSupplier(() -> { + return withSchedulerCheck(r).then(Mono.fromSupplier(() -> { setSuccess(result); return result.build(); })); @@ -192,7 +193,7 @@ public static Mono handleCollectionManagerReactive(ReactiveCluster clust } else { r = collections.updateCollection(request.getScopeName(), request.getName(), settings); } - return r.then(Mono.fromSupplier(() -> { + return withSchedulerCheck(r).then(Mono.fromSupplier(() -> { setSuccess(result); return result.build(); })); @@ -205,7 +206,7 @@ public static Mono handleCollectionManagerReactive(ReactiveCluster clust } else { r = collections.dropCollection(request.getScopeName(), request.getName()); } - return r.then(Mono.fromSupplier(() -> { + return withSchedulerCheck(r).then(Mono.fromSupplier(() -> { setSuccess(result); return result.build(); })); diff --git a/java-fit-performer/src/main/java/com/couchbase/query/QueryIndexManagerHelper.java b/java-fit-performer/src/main/java/com/couchbase/query/QueryIndexManagerHelper.java index 18b0242c7..06e43e90c 100644 --- a/java-fit-performer/src/main/java/com/couchbase/query/QueryIndexManagerHelper.java +++ b/java-fit-performer/src/main/java/com/couchbase/query/QueryIndexManagerHelper.java @@ -40,12 +40,12 @@ import java.time.Duration; import java.util.HashSet; import java.util.List; -import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import static com.couchbase.JavaSdkCommandExecutor.setSuccess; import static com.couchbase.client.performer.core.util.TimeUtil.getTimeNow; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; public class QueryIndexManagerHelper { private QueryIndexManagerHelper() { @@ -301,7 +301,7 @@ private static Mono handleQueryIndexManagerSharedReactive(@Nullable Reac res = collection.queryIndexes().createPrimaryIndex(options); } } - return res.then(Mono.fromCallable(() -> { + return withSchedulerCheck(res).then(Mono.fromCallable(() -> { result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); return result.build(); @@ -326,7 +326,7 @@ private static Mono handleQueryIndexManagerSharedReactive(@Nullable Reac res = collection.queryIndexes().createIndex(request.getIndexName(), fields, options); } } - return res.then(Mono.fromCallable(() -> { + return withSchedulerCheck(res).then(Mono.fromCallable(() -> { result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); return result.build(); @@ -350,7 +350,7 @@ private static Mono handleQueryIndexManagerSharedReactive(@Nullable Reac indexes = collection.queryIndexes().getAllIndexes(options); } } - return indexes.collectList().map(i -> { + return withSchedulerCheck(indexes).collectList().map(i -> { result.setElapsedNanos(System.nanoTime() - start); if (op.getReturnResult()) { populateResult(result, i); @@ -378,7 +378,7 @@ private static Mono handleQueryIndexManagerSharedReactive(@Nullable Reac res = collection.queryIndexes().dropPrimaryIndex(options); } } - return res.then(Mono.fromCallable(() -> { + return withSchedulerCheck(res).then(Mono.fromCallable(() -> { result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); return result.build(); @@ -402,7 +402,7 @@ private static Mono handleQueryIndexManagerSharedReactive(@Nullable Reac res = collection.queryIndexes().dropIndex(request.getIndexName(), options); } } - return res.then(Mono.fromCallable(() -> { + return withSchedulerCheck(res).then(Mono.fromCallable(() -> { result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); return result.build(); @@ -426,7 +426,7 @@ private static Mono handleQueryIndexManagerSharedReactive(@Nullable Reac res = collection.queryIndexes().watchIndexes(request.getIndexNamesList().stream().toList(), Duration.ofMillis(request.getTimeoutMsecs()), options); } } - return res.then(Mono.fromCallable(() -> { + return withSchedulerCheck(res).then(Mono.fromCallable(() -> { result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); return result.build(); @@ -450,7 +450,7 @@ private static Mono handleQueryIndexManagerSharedReactive(@Nullable Reac res = collection.queryIndexes().buildDeferredIndexes(options); } } - return res.then(Mono.fromCallable(() -> { + return withSchedulerCheck(res).then(Mono.fromCallable(() -> { result.setElapsedNanos(System.nanoTime() - start); setSuccess(result); return result.build(); diff --git a/java-fit-performer/src/main/java/com/couchbase/search/SearchHelper.java b/java-fit-performer/src/main/java/com/couchbase/search/SearchHelper.java index d73f2af71..b1b662122 100644 --- a/java-fit-performer/src/main/java/com/couchbase/search/SearchHelper.java +++ b/java-fit-performer/src/main/java/com/couchbase/search/SearchHelper.java @@ -104,6 +104,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; import static com.couchbase.JavaSdkCommandExecutor.convertMutationState; import static com.couchbase.JavaSdkCommandExecutor.setSuccess; import static com.couchbase.client.performer.core.util.TimeUtil.getTimeNow; @@ -688,7 +689,7 @@ public static Mono handleSearchQueryReactive(Cluster cluster, } } - return r.doOnNext(re -> { + return withSchedulerCheck(r).doOnNext(re -> { result.setElapsedNanos(System.nanoTime() - start); var streamer = new ReactiveSearchResultStreamer(re, @@ -774,7 +775,7 @@ public static Mono handleSearchReactive(Cluster cluster, } } - return r.doOnNext(re -> { + return withSchedulerCheck(r).doOnNext(re -> { result.setElapsedNanos(System.nanoTime() - start); var streamer = new ReactiveSearchResultStreamer(re, diff --git a/java-fit-performer/src/main/java/com/couchbase/transactions/SingleQueryTransactionExecutor.java b/java-fit-performer/src/main/java/com/couchbase/transactions/SingleQueryTransactionExecutor.java index 065c3ba0c..fea16cc39 100644 --- a/java-fit-performer/src/main/java/com/couchbase/transactions/SingleQueryTransactionExecutor.java +++ b/java-fit-performer/src/main/java/com/couchbase/transactions/SingleQueryTransactionExecutor.java @@ -43,6 +43,7 @@ import com.couchbase.utils.ResultsUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; import java.time.Duration; import java.util.ArrayList; @@ -52,6 +53,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; + /** * Executes single query transactions (tximplicit). */ @@ -106,17 +109,19 @@ private static TransactionSingleQueryResponse reactive(TransactionSingleQueryReq ClusterConnection connection, ConcurrentHashMap spans) { QueryOptions options = setSingleQueryTransactionOptions(request, connection, spans); - ReactiveQueryResult result; + Mono resultMono; if (request.getQuery().hasScope()) { String bucketName = request.getQuery().getScope().getBucketName(); String scopeName = request.getQuery().getScope().getScopeName(); Scope scope = connection.cluster().bucket(bucketName).scope(scopeName); - result = scope.reactive().query(request.getQuery().getStatement(), options).block(); + resultMono = scope.reactive().query(request.getQuery().getStatement(), options); } else { - result = connection.cluster().reactive().query(request.getQuery().getStatement(), options).block(); + resultMono = connection.cluster().reactive().query(request.getQuery().getStatement(), options); } + ReactiveQueryResult result = withSchedulerCheck(resultMono).block(); + AtomicReference errorDuringStreaming = new AtomicReference<>(); AtomicReference causeDuringStreaming = new AtomicReference<>(); AtomicBoolean rowValidationPerformed = new AtomicBoolean(); @@ -225,6 +230,10 @@ private static QueryOptions setSingleQueryTransactionOptions(TransactionSingleQu queryOptions.parentSpan(spans.get(grpcQueryOptions.getParentSpanId())); } + if (grpcQueryOptions.hasClientContextId()) { + queryOptions.clientContextId(grpcQueryOptions.getClientContextId()); + } + if (grpcQueryOptions.hasSingleQueryTransactionOptions()) { com.couchbase.client.protocol.sdk.query.SingleQueryTransactionOptions grpcSingleQueryOptions = grpcQueryOptions.getSingleQueryTransactionOptions(); SingleQueryTransactionOptions singleQueryOptions = SingleQueryTransactionOptions.singleQueryTransactionOptions(); @@ -256,4 +265,4 @@ private static QueryOptions setSingleQueryTransactionOptions(TransactionSingleQu return QueryOptions.queryOptions().asTransaction(); } } -} \ No newline at end of file +} diff --git a/java-fit-performer/src/main/java/com/couchbase/twoway/TransactionOptionsUtil.java b/java-fit-performer/src/main/java/com/couchbase/twoway/TransactionOptionsUtil.java index e2b91d8f7..ace4b7cde 100644 --- a/java-fit-performer/src/main/java/com/couchbase/twoway/TransactionOptionsUtil.java +++ b/java-fit-performer/src/main/java/com/couchbase/twoway/TransactionOptionsUtil.java @@ -22,6 +22,10 @@ import com.couchbase.client.java.transactions.config.TransactionInsertOptions; // [end] import com.couchbase.client.protocol.transactions.CommandGet; +// [if:3.7.4] +import com.couchbase.client.java.transactions.config.TransactionGetReplicaFromPreferredServerGroupOptions; +// [end] +import com.couchbase.client.protocol.transactions.CommandGetReplicaFromPreferredServerGroup; import com.couchbase.client.protocol.transactions.CommandInsert; import com.couchbase.client.protocol.transactions.CommandReplace; import com.couchbase.client.protocol.transactions.Replace; @@ -111,4 +115,20 @@ public static TransactionGetOptions transactionGetOptions(Get request) { //? public static Object transactionGetOptions(CommandGet request) { return null; } //? public static Object transactionGetOptions(Get request) { return null; } // [end] + + // [if:3.7.4] + public static TransactionGetReplicaFromPreferredServerGroupOptions transactionGetReplicaFromPreferredServerGroupOptions(CommandGetReplicaFromPreferredServerGroup request) { + TransactionGetReplicaFromPreferredServerGroupOptions options = null; + if (request.hasOptions()) { + options = TransactionGetReplicaFromPreferredServerGroupOptions.transactionGetReplicaFromPreferredServerGroupOptions(); + var opts = request.getOptions(); + if (opts.hasTranscoder()) { + options = options.transcoder(JavaSdkCommandExecutor.convertTranscoder(opts.getTranscoder())); + } + } + return options; + } + // [else] + //? public static Object TransactionGetReplicaFromPreferredServerGroupOptions(CommandGetReplicaFromPreferredServerGroup request) { return null; } + // [end] } diff --git a/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionBlocking.java b/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionBlocking.java index 8ff3c0cfd..f6741a9f8 100644 --- a/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionBlocking.java +++ b/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionBlocking.java @@ -179,14 +179,14 @@ private void performOperation(ClusterConnection connection, request.getDocId().getDocId(), content); } - // [start:3.6.2] + // [if:3.6.2] else { ctx.insert(collection, request.getDocId().getDocId(), content, options); } - // [start:3.6.2] + // [end] }); } else if (op.hasInsertV2()) { var request = op.getInsertV2(); @@ -258,11 +258,18 @@ private void performOperation(ClusterConnection connection, if (!stashedGetMap.containsKey(request.getUseStashedSlot())) { throw new IllegalStateException("Do not have a stashed get in slot " + request.getUseStashedSlot()); } - ctx.replace(stashedGetMap.get(request.getUseStashedSlot()), content); + if (options == null) { + ctx.replace(stashedGetMap.get(request.getUseStashedSlot()), content); + } + // [start:3.6.2] + else { + ctx.replace(stashedGetMap.get(request.getUseStashedSlot()), content, options); + } + // [end:3.6.2] } else { var collection = connection.collection(request.getLocation()); var r = ctx.get(collection, executor.getDocId(request.getLocation())); - if (options != null) { + if (options == null) { ctx.replace(r, content); } // [start:3.6.2] @@ -324,7 +331,7 @@ private void performOperation(ClusterConnection connection, performOperation(dbg + "get " + request.getDocId().getDocId(), ctx, request.getExpectedResultList(), op.getDoNotPropagateError(), performanceMode, () -> { logger.info("{} Performing get operation on {} on bucket {} on collection {}", dbg, request.getDocId().getDocId(),request.getDocId().getBucketName(),request.getDocId().getCollectionName()); - TransactionGetResult out; + TransactionGetResult out = null; if (options == null) { out = ctx.get(collection, request.getDocId().getDocId()); } @@ -332,7 +339,10 @@ private void performOperation(ClusterConnection connection, else { out = ctx.get(collection, request.getDocId().getDocId(), options); } - handleGetResult(request, out, connection, request.hasContentAsValidation() ? request.getContentAsValidation() : null); + // [end:3.6.2] + if (out != null) { + handleGetResult(request, out, connection, request.hasContentAsValidation() ? request.getContentAsValidation() : null); + } }); } else if (op.hasGetV2()) { var request = op.getGetV2(); @@ -374,6 +384,25 @@ private void performOperation(ClusterConnection connection, } handleGetOptionalResult(request, req, out, connection, request.hasContentAsValidation() ? request.getContentAsValidation() : null); }); + // [if:3.7.4] + } else if (op.hasGetFromPreferredServerGroup()) { + var request = op.getGetFromPreferredServerGroup(); + var collection = connection.collection(request.getDocId()); + var options = TransactionOptionsUtil.transactionGetReplicaFromPreferredServerGroupOptions(request); + + performOperation(dbg + "getFromPreferredServerGroup " + request.getDocId().getDocId(), ctx, request.getExpectedResultList(), op.getDoNotPropagateError(), performanceMode, + () -> { + logger.info("{} Performing getFromPreferredServerGroup operation on {} on bucket {} on collection {}", dbg, request.getDocId().getDocId(),request.getDocId().getBucketName(),request.getDocId().getCollectionName()); + TransactionGetResult out; + if (options == null) { + out = ctx.getReplicaFromPreferredServerGroup(collection, request.getDocId().getDocId()); + } + else { + out = ctx.getReplicaFromPreferredServerGroup(collection, request.getDocId().getDocId(), options); + } + handleGetReplicaFromPreferredServerGroupResult(request, out, request.hasContentAsValidation() ? request.getContentAsValidation() : null); + }); + // [end] } else if (op.hasWaitOnLatch()) { final CommandWaitOnLatch request = op.getWaitOnLatch(); final String latchName = request.getLatchName(); diff --git a/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionReactive.java b/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionReactive.java index 9f7490dec..9e96f5d30 100644 --- a/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionReactive.java +++ b/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionReactive.java @@ -67,6 +67,8 @@ import java.util.function.Function; import java.util.function.Supplier; +import static com.couchbase.utils.UserSchedulerUtil.withSchedulerCheck; + /** * Version of TwoWayTransaction that uses the reactive API. */ @@ -268,6 +270,25 @@ private Mono performOperation(ClusterConnection connection, }) .then(); }); + // [if:3.7.4] + } else if (op.hasGetFromPreferredServerGroup()) { + var request = op.getGetFromPreferredServerGroup(); + var collection = connection.collection(request.getDocId()); + var options = TransactionOptionsUtil.transactionGetReplicaFromPreferredServerGroupOptions(request); + + return performOperation(waitIfNeeded, dbg + "getFromPreferredServerGroup " + request.getDocId().getDocId(), ctx, request.getExpectedResultList(), op.getDoNotPropagateError(), performanceMode, + () -> { + logger.info("Performing getFromPreferredServerGroup operation on {} on bucket {} on collection {}", request.getDocId().getDocId(), request.getDocId().getBucketName(), request.getDocId().getCollectionName()); + return Mono.defer(() -> { + if (options != null) { + return ctx.getReplicaFromPreferredServerGroup(collection.reactive(), request.getDocId().getDocId(), options); + } + return ctx.getReplicaFromPreferredServerGroup(collection.reactive(), request.getDocId().getDocId()); + }) + .doOnNext(out -> handleGetReplicaFromPreferredServerGroupResult(request, out, request.hasContentAsValidation() ? request.getContentAsValidation() : null)) + .then(); + }); + // [end] } else if (op.hasWaitOnLatch()) { final CommandWaitOnLatch request = op.getWaitOnLatch(); final String latchName = request.getLatchName(); @@ -381,7 +402,7 @@ private Mono performOperation(Mono preOp, } return preOp - .then(op.get()) + .then(withSchedulerCheck(op.get())) .then(Mono.defer(() -> { if (!performanceMode) { logger.info("Took {} millis to run command '{}'", System.currentTimeMillis() - now, opDebug); diff --git a/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionShared.java b/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionShared.java index ec72fc7ae..6152bc182 100644 --- a/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionShared.java +++ b/java-fit-performer/src/main/java/com/couchbase/twoway/TwoWayTransactionShared.java @@ -42,6 +42,7 @@ import com.couchbase.client.protocol.transactions.CommandBatch; import com.couchbase.client.protocol.transactions.CommandGet; import com.couchbase.client.protocol.transactions.CommandGetOptional; +import com.couchbase.client.protocol.transactions.CommandGetReplicaFromPreferredServerGroup; import com.couchbase.client.protocol.transactions.CommandSetLatch; import com.couchbase.client.protocol.transactions.CommandWaitOnLatch; import com.couchbase.client.protocol.shared.ContentAsPerformerValidation; @@ -308,14 +309,48 @@ protected void handleGetOptionalResult(CommandGet request, } protected void handleGetResult(CommandGet request, TransactionGetResult getResult, ClusterConnection cc, @Nullable ContentAsPerformerValidation contentAs) { - stashedGet.set(getResult); + if (request.hasStashInSlot()) { + stashedGetMap.put(request.getStashInSlot(), getResult); + logger.info("Stashed {} in slot {}", getResult.id(), request.getStashInSlot()); + // stashedGetMap.forEach((k, v) -> logger.info("Stash: {}={}", k, v)); + } + + handleGetResultInternal(getResult, contentAs); + + if (!request.getExpectedContentJson().isEmpty()) { + JsonObject expected = JsonObject.fromJson(request.getExpectedContentJson()); + // Due to JsonValueSerializerWrapper, can't use contentAsObject() as that uses Jackson directly rather + // than the wrapped CustomSerializer. CustomSerializer always returns a JsonObject, so we request a String + // and cast to that. Unfortunately we can't always do this, as with the default Json serializer contentAsObject() + // throws if we ask for String.class. + JsonObject actual; + if (cc.cluster().environment().jsonSerializer() instanceof JsonValueSerializerWrapper) { + actual = (JsonObject) (Object) getResult.contentAs(String.class); + } + else { + actual = getResult.contentAsObject(); + } + + if (!expected.equals(actual)) { + logger.warn("Expected content {}, got content {}", expected, actual); + throw new TestFailure(new IllegalStateException("Did not get expected content")); + } + } + } + protected void handleGetReplicaFromPreferredServerGroupResult(CommandGetReplicaFromPreferredServerGroup request, TransactionGetResult getResult, @Nullable ContentAsPerformerValidation contentAs) { if (request.hasStashInSlot()) { stashedGetMap.put(request.getStashInSlot(), getResult); logger.info("Stashed {} in slot {}", getResult.id(), request.getStashInSlot()); // stashedGetMap.forEach((k, v) -> logger.info("Stash: {}={}", k, v)); } + handleGetResultInternal(getResult, contentAs); + } + + protected void handleGetResultInternal(TransactionGetResult getResult, @Nullable ContentAsPerformerValidation contentAs) { + stashedGet.set(getResult); + if (contentAs != null) { var content = ContentAsUtil.contentType( contentAs.getContentAs(), @@ -338,26 +373,6 @@ protected void handleGetResult(CommandGet request, TransactionGetResult getResul } } } - - if (!request.getExpectedContentJson().isEmpty()) { - JsonObject expected = JsonObject.fromJson(request.getExpectedContentJson()); - // Due to JsonValueSerializerWrapper, can't use contentAsObject() as that uses Jackson directly rather - // than the wrapped CustomSerializer. CustomSerializer always returns a JsonObject, so we request a String - // and cast to that. Unfortunately we can't always do this, as with the default Json serializer contentAsObject() - // throws if we ask for String.class. - JsonObject actual; - if (cc.cluster().environment().jsonSerializer() instanceof JsonValueSerializerWrapper) { - actual = (JsonObject) (Object) getResult.contentAs(String.class); - } - else { - actual = getResult.contentAsObject(); - } - - if (!expected.equals(actual)) { - logger.warn("Expected content {}, got content {}", expected, actual); - throw new TestFailure(new IllegalStateException("Did not get expected content")); - } - } } protected void handleWaitOnLatch(CommandWaitOnLatch request, CoreTransactionLogger txnLogger) { diff --git a/java-fit-performer/src/main/java/com/couchbase/utils/Capabilities.java b/java-fit-performer/src/main/java/com/couchbase/utils/Capabilities.java index cf0ac9ac4..3710a8d60 100644 --- a/java-fit-performer/src/main/java/com/couchbase/utils/Capabilities.java +++ b/java-fit-performer/src/main/java/com/couchbase/utils/Capabilities.java @@ -84,6 +84,14 @@ public static List sdkImplementationCaps() { out.add(Caps.SDK_VECTOR_SEARCH_BASE64); // [end] + // [if:3.7.4] + out.add(Caps.SDK_ZONE_AWARE_READ_FROM_REPLICA); + // [end] + + // [if:3.7.6] + out.add(Caps.SDK_OBSERVABILITY_CLUSTER_LABELS); + // [end] + return out; } } diff --git a/java-fit-performer/src/main/java/com/couchbase/utils/ContentAsUtil.java b/java-fit-performer/src/main/java/com/couchbase/utils/ContentAsUtil.java index b38a6bf8f..ca52754c7 100644 --- a/java-fit-performer/src/main/java/com/couchbase/utils/ContentAsUtil.java +++ b/java-fit-performer/src/main/java/com/couchbase/utils/ContentAsUtil.java @@ -20,8 +20,8 @@ import com.couchbase.client.protocol.shared.ContentAs; import com.couchbase.client.protocol.shared.ContentTypes; import com.google.protobuf.ByteString; +import reactor.util.annotation.Nullable; -import java.util.List; import java.util.function.Supplier; public class ContentAsUtil { @@ -91,62 +91,38 @@ public static Try contentType(ContentAs contentAs, } } - public static Try> contentTypeList(ContentAs contentAs, - Supplier> asByteArray, - Supplier> asString, - Supplier> asJsonObject, - Supplier> asJsonArray, - Supplier> asBoolean, - Supplier> asInteger, - Supplier> asDouble) { - try { - if (contentAs.hasAsByteArray()) { - return new Try<>(asByteArray.get().stream() - .map(v -> v != null - ? ContentTypes.newBuilder().setContentAsBytes(ByteString.copyFrom(v)).build() - : getNullContentType().value()) - .toList()); - } else if (contentAs.hasAsString()) { - return new Try<>(asString.get().stream() - .map(v -> v != null - ? ContentTypes.newBuilder().setContentAsString(v).build() - : getNullContentType().value()).toList()); - } else if (contentAs.hasAsJsonObject()) { - return new Try<>(asJsonObject.get().stream() - .map(v -> v != null - ? ContentTypes.newBuilder().setContentAsBytes(ByteString.copyFrom(v.toBytes())).build() - : getNullContentType().value()) - .toList()); - } else if (contentAs.hasAsJsonArray()) { - return new Try<>(asJsonArray.get().stream() - .map(v -> v != null - ? ContentTypes.newBuilder().setContentAsBytes(ByteString.copyFrom(v.toBytes())).build() - : getNullContentType().value()) - .toList()); - } else if (contentAs.getAsBoolean()) { - return new Try<>(asBoolean.get().stream() - .map(v -> v != null - ? ContentTypes.newBuilder().setContentAsBool(v).build() - : getNullContentType().value()) - .toList()); - } else if (contentAs.hasAsInteger()) { - return new Try<>(asInteger.get().stream() - .map(v -> v != null - ? ContentTypes.newBuilder().setContentAsInt64(v).build() - : getNullContentType().value()) - .toList()); - } else if (contentAs.hasAsFloatingPoint()) { - return new Try<>(asDouble.get().stream() - .map(v -> v != null - ?ContentTypes.newBuilder().setContentAsDouble(v).build() - : getNullContentType().value()) - .toList()); - } else { - throw new UnsupportedOperationException("Java performer cannot handle contentAs " + contentAs.toString()); - } - } catch (RuntimeException err) { - return new Try<>(err); + public static Class toJavaClass(ContentAs contentAs) { + return switch (contentAs.getAsCase()) { + case AS_STRING -> String.class; + case AS_BYTE_ARRAY -> byte[].class; + case AS_JSON_OBJECT -> JsonObject.class; + case AS_JSON_ARRAY -> JsonArray.class; + case AS_BOOLEAN -> Boolean.class; + case AS_INTEGER -> Integer.class; + case AS_FLOATING_POINT -> Double.class; + + default -> throw new UnsupportedOperationException("Java performer cannot handle contentAs " + contentAs); + }; + } + + public static ContentTypes toFitContent(@Nullable Object value, ContentAs contentAs) { + ContentTypes.Builder builder = ContentTypes.newBuilder(); + + if (value == null) return builder.setContentAsNull(ContentTypes.NullValue.getDefaultInstance()).build(); + + switch (contentAs.getAsCase()) { + case AS_STRING -> builder.setContentAsString((String) value); + case AS_BYTE_ARRAY -> builder.setContentAsBytes(ByteString.copyFrom((byte[]) value)); + case AS_JSON_OBJECT -> builder.setContentAsBytes(ByteString.copyFrom(((JsonObject) value).toBytes())); + case AS_JSON_ARRAY -> builder.setContentAsBytes(ByteString.copyFrom(((JsonArray) value).toBytes())); + case AS_BOOLEAN -> builder.setContentAsBool((Boolean) value); + case AS_INTEGER -> builder.setContentAsInt64((Integer) value); + case AS_FLOATING_POINT -> builder.setContentAsDouble((Double) value); + + default -> throw new UnsupportedOperationException("Java performer cannot handle contentAs " + contentAs); } + + return builder.build(); } public static byte[] convert(ContentTypes content) { diff --git a/java-fit-performer/src/main/java/com/couchbase/utils/CustomJsonSerializer.java b/java-fit-performer/src/main/java/com/couchbase/utils/CustomJsonSerializer.java index 2f04e5e15..12c4abbee 100644 --- a/java-fit-performer/src/main/java/com/couchbase/utils/CustomJsonSerializer.java +++ b/java-fit-performer/src/main/java/com/couchbase/utils/CustomJsonSerializer.java @@ -1,24 +1,18 @@ -/** - * CustomJsonSerializer provides a generic implementation of the JsonSerializer interface. - - * This serializer is designed to handle the conversion of Java objects to JSON format - * and back, with an additional boolean flag (`Serialized`) that indicates whether - * the object has been serialized. The flag is included in the JSON payload, making - * it easy to track the serialization state of objects. - - * Use Cases: - * - This serializer can be used in scenarios where you need to serialize and deserialize - * objects while keeping track of their serialization state. - - * Limitations: - * - The current implementation assumes that the input objects can be serialized into - * a JSON format using Jackson's ObjectMapper. Complex or non-standard objects may - * require additional handling. - * - The `deserialize` methods in this implementation modify the original JSON object - * by setting the `Serialized` flag to `false`, which might not be suitable for - * all use cases. +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package com.couchbase.utils; import com.couchbase.client.core.deps.com.fasterxml.jackson.core.JsonProcessingException; @@ -28,7 +22,30 @@ import com.couchbase.client.java.codec.TypeRef; import com.couchbase.client.java.json.JsonObject; - +/** + * CustomJsonSerializer provides a generic implementation of the JsonSerializer interface. + *

+ * This serializer is designed to handle the conversion of Java objects to JSON format + * and back, with an additional boolean flag (`Serialized`) that indicates whether + * the object has been serialized. The flag is included in the JSON payload, making + * it easy to track the serialization state of objects. + *

+ * Use Cases: + *

    + *
  • This serializer can be used in scenarios where you need to serialize and deserialize + * objects while keeping track of their serialization state. + *
+ *

+ * Limitations: + *

    + *
  • The current implementation assumes that the input objects can be serialized into + * a JSON format using Jackson's ObjectMapper. Complex or non-standard objects may + * require additional handling. + *
  • The `deserialize` methods in this implementation modify the original JSON object + * by setting the `Serialized` flag to `false`, which might not be suitable for + * all use cases. + *
+ */ public class CustomJsonSerializer implements JsonSerializer { @Override public byte[] serialize(Object input) { diff --git a/java-fit-performer/src/main/java/com/couchbase/utils/OptionsUtil.java b/java-fit-performer/src/main/java/com/couchbase/utils/OptionsUtil.java index 52a3e772a..e462a9c1e 100644 --- a/java-fit-performer/src/main/java/com/couchbase/utils/OptionsUtil.java +++ b/java-fit-performer/src/main/java/com/couchbase/utils/OptionsUtil.java @@ -92,15 +92,14 @@ public class OptionsUtil { private OptionsUtil() {} - @Nullable public static + public static ClusterEnvironment.Builder convertClusterConfig(ClusterConnectionCreateRequest request, Supplier getCluster, ArrayList onClusterConnectionClose) { - ClusterEnvironment.Builder clusterEnvironment = null; + ClusterEnvironment.Builder clusterEnvironment = ClusterEnvironment.builder(); if (request.hasClusterConfig()) { var cc = request.getClusterConfig(); - clusterEnvironment = ClusterEnvironment.builder(); if (cc.getUseCustomSerializer()) { clusterEnvironment.jsonSerializer(new CustomSerializer()); @@ -154,6 +153,12 @@ ClusterEnvironment.Builder convertClusterConfig(ClusterConnectionCreateRequest r if (cc.hasObservabilityConfig()) { applyObservabilityConfig(clusterEnvironment, cc, onClusterConnectionClose); } + + if (cc.hasPreferredServerGroup()) { + // [if:3.7.4] + clusterEnvironment.preferredServerGroup(cc.getPreferredServerGroup()); + // [end] + } } return clusterEnvironment; @@ -603,6 +608,10 @@ public static com.couchbase.client.java.transactions.TransactionQueryOptions tra if (qo.hasScanWaitMillis()) { queryOptions.scanWait(Duration.ofMillis(qo.getScanWaitMillis())); } + + if (qo.hasClientContextId()) { + queryOptions.clientContextId(qo.getClientContextId()); + } } return queryOptions; } diff --git a/java-fit-performer/src/main/java/com/couchbase/utils/ResultsUtil.java b/java-fit-performer/src/main/java/com/couchbase/utils/ResultsUtil.java index 4549c7fff..a324dd802 100644 --- a/java-fit-performer/src/main/java/com/couchbase/utils/ResultsUtil.java +++ b/java-fit-performer/src/main/java/com/couchbase/utils/ResultsUtil.java @@ -206,6 +206,9 @@ else if (ex instanceof AmbiguousTimeoutException) { else if (ex instanceof AuthenticationFailureException) { return ExternalException.AuthenticationFailureException; } + else if (ex instanceof com.couchbase.client.core.error.DocumentUnretrievableException) { + return ExternalException.DocumentUnretrievableException; + } else if (ex instanceof com.couchbase.client.core.error.CouchbaseException) { return ExternalException.CouchbaseException; } diff --git a/java-fit-performer/src/main/java/com/couchbase/utils/UserSchedulerUtil.java b/java-fit-performer/src/main/java/com/couchbase/utils/UserSchedulerUtil.java new file mode 100644 index 000000000..9a9c0f900 --- /dev/null +++ b/java-fit-performer/src/main/java/com/couchbase/utils/UserSchedulerUtil.java @@ -0,0 +1,94 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.utils; + +import com.couchbase.InternalPerformerFailure; +import com.couchbase.client.core.deps.io.netty.util.concurrent.DefaultThreadFactory; +import com.google.common.base.Throwables; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.Disposable; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import javax.annotation.Nullable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class UserSchedulerUtil { + private UserSchedulerUtil() { + } + + private static final Logger logger = LoggerFactory.getLogger(UserSchedulerUtil.class); + + private static final String USER_SCHEDULER_THREAD_POOL_NAME = "custom-user-scheduler"; + + public record ExecutorAndScheduler(ExecutorService executorService, Scheduler scheduler) implements Disposable { + @Override + public void dispose() { + scheduler.dispose(); + executorService.shutdownNow(); + } + } + + public static ExecutorAndScheduler userExecutorAndScheduler() { + var executorService = Executors.newCachedThreadPool(new DefaultThreadFactory(USER_SCHEDULER_THREAD_POOL_NAME, true)); + var userScheduler = Schedulers.fromExecutor(executorService); + return new ExecutorAndScheduler(executorService, userScheduler); + } + + // Getting stack traces is expensive; skip it in case this is a performance test. + private static final boolean CAPTURE_STACK_TRACE = false; + + public static Mono withSchedulerCheck(Mono publisher) { + Exception location = CAPTURE_STACK_TRACE ? new Exception("Scheduler check was applied here") : null; + return publisher + .doOnNext(it -> assertInCustomUserSchedulerThread("onNext", location)) + .doOnError(it -> assertInCustomUserSchedulerThread("onError", location)) + .doOnSuccess(it -> assertInCustomUserSchedulerThread("onSuccess", location)); + } + + public static Flux withSchedulerCheck(Flux publisher) { + Exception location = CAPTURE_STACK_TRACE ? new Exception("Scheduler check was applied here") : null; + return publisher + .doOnNext(it -> assertInCustomUserSchedulerThread("onNext", location)) + .doOnError(it -> assertInCustomUserSchedulerThread("onError", location)) + .doOnComplete(() -> assertInCustomUserSchedulerThread("onComplete", location)); + } + + private static void assertInCustomUserSchedulerThread( + String hookType, + @Nullable Exception locationWhereSchedulerCheckWasApplied + ) { + // [if:3.7.5] first version that allows specifying custom publishOn scheduler + String threadName = Thread.currentThread().getName(); + boolean isUserThread = threadName.contains(USER_SCHEDULER_THREAD_POOL_NAME); + + if (!isUserThread) { + String location = locationWhereSchedulerCheckWasApplied == null + ? "To discover the location of the failed scheduler check, set CAPTURE_STACK_TRACE to true in performer source code." + : Throwables.getStackTraceAsString(locationWhereSchedulerCheckWasApplied); + + String msg = "Expected reactive " + hookType + " handler to run in custom user scheduler thread, but thread name was: " + threadName + " ; location = " + location; + logger.error(msg); + throw new InternalPerformerFailure(new RuntimeException(msg)); + } + // [end] + } +} diff --git a/kotlin-client/pom.xml b/kotlin-client/pom.xml index c59243718..438743971 100644 --- a/kotlin-client/pom.xml +++ b/kotlin-client/pom.xml @@ -7,11 +7,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 kotlin-client - 1.4.3 + 1.4.6 Couchbase Kotlin SDK The official Couchbase Kotlin SDK @@ -30,6 +30,12 @@ + + + com.couchbase.client + core-io + + org.jetbrains.kotlin @@ -51,6 +57,11 @@ kotlinx-coroutines-reactive ${kotlin.coroutines.version} + + org.jetbrains.kotlinx + kotlinx-coroutines-reactor + ${kotlin.coroutines.version} + @@ -85,12 +96,6 @@ true - - - com.couchbase.client - core-io - - com.couchbase.client diff --git a/kotlin-client/src/integrationTest/resources/integration.properties b/kotlin-client/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/kotlin-client/src/integrationTest/resources/integration.properties +++ b/kotlin-client/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Cluster.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Cluster.kt index 4821ce3c5..dba994bef 100644 --- a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Cluster.kt +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Cluster.kt @@ -39,6 +39,7 @@ import com.couchbase.client.kotlin.analytics.AnalyticsPriority import com.couchbase.client.kotlin.analytics.AnalyticsScanConsistency import com.couchbase.client.kotlin.analytics.internal.AnalyticsExecutor import com.couchbase.client.kotlin.annotations.UncommittedCouchbaseApi +import com.couchbase.client.kotlin.annotations.VolatileCouchbaseApi import com.couchbase.client.kotlin.codec.JsonSerializer import com.couchbase.client.kotlin.diagnostics.DiagnosticsResult import com.couchbase.client.kotlin.diagnostics.PingResult @@ -71,6 +72,7 @@ import com.couchbase.client.kotlin.search.SearchRow import com.couchbase.client.kotlin.search.SearchScanConsistency import com.couchbase.client.kotlin.search.SearchSort import com.couchbase.client.kotlin.search.SearchSpec +import com.couchbase.client.kotlin.transactions.Transactions import kotlinx.coroutines.flow.Flow import kotlinx.coroutines.future.await import kotlinx.coroutines.reactive.awaitSingle @@ -186,6 +188,15 @@ public class Cluster internal constructor( public val httpClient: CouchbaseHttpClient get() = CouchbaseHttpClient(this) + /** + * A runner for transactional operations. + * + * @sample com.couchbase.client.kotlin.samples.simpleTransactionExample + */ + @VolatileCouchbaseApi + public val transactions: Transactions + get() = Transactions(core) + /** * A manager for administering buckets (create, update, drop, flush, list, etc.) */ diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Collection.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Collection.kt index 3d710c4eb..189eb60cd 100644 --- a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Collection.kt +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Collection.kt @@ -20,6 +20,7 @@ import com.couchbase.client.core.CoreKeyspace import com.couchbase.client.core.annotation.SinceCouchbase import com.couchbase.client.core.api.CoreCouchbaseOps import com.couchbase.client.core.api.kv.CoreAsyncResponse +import com.couchbase.client.core.api.kv.CoreReadPreference import com.couchbase.client.core.api.shared.CoreMutationState import com.couchbase.client.core.endpoint.http.CoreCommonOptions import com.couchbase.client.core.error.CasMismatchException @@ -335,7 +336,7 @@ public class Collection internal constructor( id: String, common: CommonOptions = CommonOptions.Default, ): Flow { - return kvOps.getAllReplicasReactive(common.toCore(), id) + return kvOps.getAllReplicasReactive(common.toCore(), id, CoreReadPreference.NO_PREFERENCE) .asFlow().map { GetReplicaResult(id, it, defaultTranscoder) } } @@ -363,7 +364,7 @@ public class Collection internal constructor( id: String, common: CommonOptions = CommonOptions.Default, ): GetReplicaResult? { - val response = kvOps.getAnyReplicaReactive(common.toCore(), id) + val response = kvOps.getAnyReplicaReactive(common.toCore(), id, CoreReadPreference.NO_PREFERENCE) .awaitFirstOrNull() ?: return null return GetReplicaResult(id, response, defaultTranscoder) @@ -643,7 +644,8 @@ public class Collection internal constructor( val coreResult = kvOps.subdocGetAnyReplicaReactive( common.toCore(), id, - spec.commands + spec.commands, + CoreReadPreference.NO_PREFERENCE, ).awaitFirst() return LookupInReplicaResult(coreResult, defaultJsonSerializer, spec) @@ -663,6 +665,7 @@ public class Collection internal constructor( common.toCore(), id, spec.commands, + CoreReadPreference.NO_PREFERENCE, ) return flux .map { coreResult -> LookupInReplicaResult(coreResult, defaultJsonSerializer, spec) } diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Scope.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Scope.kt index b7ab9f8f6..c159d056a 100644 --- a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Scope.kt +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/Scope.kt @@ -71,13 +71,14 @@ import kotlinx.coroutines.flow.flow import kotlinx.coroutines.flow.map import kotlinx.coroutines.reactive.asFlow import kotlinx.coroutines.reactive.awaitSingle -import java.util.* +import java.util.UUID import java.util.concurrent.ConcurrentHashMap public class Scope( public val name: String, public val bucket: Bucket, ) { + internal val queryContext = CoreQueryContext.of(bucket.name, name) internal val couchbaseOps: CoreCouchbaseOps = bucket.couchbaseOps private val searchOps = couchbaseOps.searchOps(CoreBucketAndScope(bucket.name, name)) @@ -85,7 +86,7 @@ public class Scope( private val queryExecutor = QueryExecutor( couchbaseOps.queryOps(), - CoreQueryContext.of(bucket.name, name), + queryContext, bucket.env.jsonSerializer, ) diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/ClusterEnvironmentDslBuilder.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/ClusterEnvironmentDslBuilder.kt index 8e03115cf..ba0c723b4 100644 --- a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/ClusterEnvironmentDslBuilder.kt +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/ClusterEnvironmentDslBuilder.kt @@ -124,6 +124,14 @@ public class ClusterEnvironmentDslBuilder { loggingMeterConfigDslBuilder.initializer() } + private var transactionsConfigDslBuilder = TransactionsConfigDslBuilder() + @VolatileCouchbaseApi + public fun transactions(initializer: TransactionsConfigDslBuilder.() -> Unit) { + transactionsConfigDslBuilder.initializer() + + wrapped.transactionsConfig(transactionsConfigDslBuilder.toCore()) + } + /** * @see CoreEnvironment.Builder.eventBus */ diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/TransactionsCleanupConfigDslBuilder.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/TransactionsCleanupConfigDslBuilder.kt new file mode 100644 index 000000000..48f606c85 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/TransactionsCleanupConfigDslBuilder.kt @@ -0,0 +1,75 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.env.dsl + +import com.couchbase.client.kotlin.Keyspace +import kotlin.time.Duration +import kotlin.time.Duration.Companion.minutes + +@ClusterEnvironmentDslMarker +public class TransactionsCleanupConfigDslBuilder { + /** + * Controls whether a background thread is created to clean up any transaction attempts made by this client. + * + * Defaults to true. + * + * This should typically be left at the default value. + * + * If false, this client's transactions will only be cleaned up + * by the lost attempts cleanup process, which is by necessity slower. + */ + public var cleanupClientAttempts: Boolean = true + + /** + * Controls whether a background process is created to clean up any 'lost' transaction attempts. + * + * Defaults to true. + * + * This should typically be left at the default value, because cleanup is an essential part of Couchbase + * transactions. + */ + public var cleanupLostAttempts: Boolean = true + + + /** + * Determines how frequently the SDK scans for 'lost' transaction attempts + * when [cleanupLostAttempts] is true. + * + * Defaults to 1 minute. + * + * The default value balances promptness of lost transaction discovery + * against the cost of doing the scan, and is suitable for most environments. + * + * An application that prefers to discover lost transactions sooner may reduce this value + * at the cost of increased resource usage on client and server. + */ + public var cleanupWindow: Duration = 1.minutes + + /** + * The initial set of transaction metadata collections to clean up. + * Has no effect if transaction cleanup is explicitly disabled. + * + * Defaults to an empty set. + * + * Specifying at least one collection has the side effect of starting cleanup immediately, + * instead of deferring cleanup until the first transaction starts. + * + * Note that [TransactionsConfigDslBuilder.metadataCollection] does not need to be specified here, + * since it's always cleaned up (unless transaction cleanup is explicitly disabled). + */ + public var collections: Set = emptySet() +} diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/TransactionsConfigDslBuilder.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/TransactionsConfigDslBuilder.kt new file mode 100644 index 000000000..6f45fe8e8 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/env/dsl/TransactionsConfigDslBuilder.kt @@ -0,0 +1,113 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.env.dsl + +import com.couchbase.client.core.transaction.atr.ActiveTransactionRecordIds +import com.couchbase.client.core.transaction.config.CoreTransactionsCleanupConfig +import com.couchbase.client.core.transaction.config.CoreTransactionsConfig +import com.couchbase.client.core.transaction.forwards.CoreTransactionsSupportedExtensions +import com.couchbase.client.kotlin.Keyspace +import com.couchbase.client.kotlin.internal.toOptional +import com.couchbase.client.kotlin.kv.Durability +import com.couchbase.client.kotlin.transactions.toCollectionIdentifier +import java.util.Optional +import kotlin.time.Duration +import kotlin.time.Duration.Companion.seconds +import kotlin.time.toJavaDuration + + +internal fun checkTransactionDurability(value: Durability?) { + require(value !is Durability.ClientVerified) { "Transaction durability must not be ClientVerified." } + require(value !is Durability.None && value !is Durability.Disabled) { "Transaction durability must not be None." } +} + +@ClusterEnvironmentDslMarker +public class TransactionsConfigDslBuilder { + /** + * Default durability level for all writes in transactions. + * + * The initial value is [Durability.majority], meaning a transaction will pause on each write + * until it is available in-memory on a majority of configured replicas. + * + * Must not be [Durability.none] or [Durability.clientVerified], which are not compatible with transactions. + */ + public var durabilityLevel: Durability = Durability.majority() + set(value) { + checkTransactionDurability(value) + field = value + } + + /** + * Default maximum time a transaction can run for. + * + * The initial value is 15 seconds. + * + * After this time, the transaction will abort. Note that this could be mid-commit, in which case the cleanup process + * will complete the transaction asynchronously at a later point. + * + * Applications can increase or decrease this as desired. The trade-off to understand is that documents + * being mutated in a transaction `A` are effectively locked from being updated by other transactions until + * transaction `A` has completed (committed or rolled back). If transaction `A` is unable to complete for whatever + * reason, the document can be locked for this [timeout] time. + */ + public var timeout: Duration = 15.seconds + set(value) { + require(value.inWholeMilliseconds > 0) { "Transaction timeout must be at least 1 millisecond, but got $value" } + field = value + } + + /** + * Default collection for storing transaction metadata documents. + * + * The initial value is null. If null, the documents are stored in the default collection + * of the bucket containing the first mutated document in the transaction. + * + * This collection is automatically added to the set of collections to clean up. + */ + public var metadataCollection: Keyspace? = null + + /** + * Configure options related to transaction cleanup. + */ + private val cleanupConfigDlsBuilder = TransactionsCleanupConfigDslBuilder() + public fun cleanup(initializer: TransactionsCleanupConfigDslBuilder.() -> Unit) { + cleanupConfigDlsBuilder.initializer() + } + + internal fun toCore(): CoreTransactionsConfig { + return CoreTransactionsConfig( + durabilityLevel.toCore().levelIfSynchronous().orElseThrow { NoSuchElementException() }, + timeout.toJavaDuration(), + with(cleanupConfigDlsBuilder) { + CoreTransactionsCleanupConfig( + cleanupLostAttempts, + cleanupClientAttempts, + cleanupWindow.toJavaDuration(), + collections.map { it.toCollectionIdentifier() }.toSet() + ) + }, + null, // txn attempt context factory + null, // cleaner factory + null, // client record factory + ActiveTransactionRecordIds.NUM_ATRS_DEFAULT, + metadataCollection?.toCollectionIdentifier().toOptional(), + Optional.empty(), // For Kotlin, scan consistency is specified on each query request. + CoreTransactionsSupportedExtensions.ALL, + ) + } +} + diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/QueryScanConsistency.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/QueryScanConsistency.kt index 559b08aaf..762191a75 100644 --- a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/QueryScanConsistency.kt +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/QueryScanConsistency.kt @@ -17,29 +17,21 @@ package com.couchbase.client.kotlin.query import com.couchbase.client.core.api.shared.CoreMutationState -import com.couchbase.client.core.util.Golang import com.couchbase.client.kotlin.internal.isEmpty import com.couchbase.client.kotlin.kv.MutationState import com.couchbase.client.kotlin.query.QueryScanConsistency.Companion.consistentWith import com.couchbase.client.kotlin.query.QueryScanConsistency.Companion.notBounded import com.couchbase.client.kotlin.query.QueryScanConsistency.Companion.requestPlus import kotlin.time.Duration -import kotlin.time.toJavaDuration /** * Create instances using the [requestPlus], [consistentWith], or [notBounded] * factory methods. */ public sealed class QueryScanConsistency( - private val wireName: String?, internal val scanWait: Duration?, ) { - internal open fun inject(queryJson: MutableMap): Unit { - wireName?.let { queryJson["scan_consistency"] = it } - scanWait?.let { queryJson["scan_wait"] = Golang.encodeDurationToMs(it.toJavaDuration()) } - } - public companion object { /** * For when speed matters more than consistency. Executes the query @@ -70,20 +62,15 @@ public sealed class QueryScanConsistency( if (tokens.isEmpty()) NotBounded else ConsistentWith(tokens, scanWait) } - internal object NotBounded : QueryScanConsistency(null, null) + internal object NotBounded : QueryScanConsistency(null) internal class RequestPlus internal constructor(scanWait: Duration? = null) : - QueryScanConsistency("request_plus", scanWait) + QueryScanConsistency(scanWait) internal class ConsistentWith internal constructor( private val tokens: MutationState, scanWait: Duration? = null, - ) : QueryScanConsistency("at_plus", scanWait) { - override fun inject(queryJson: MutableMap): Unit { - super.inject(queryJson) - queryJson["scan_vectors"] = tokens.export() - } - + ) : QueryScanConsistency(scanWait) { fun toCore(): CoreMutationState = CoreMutationState(tokens) } } diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/internal/QueryExecutor.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/internal/QueryExecutor.kt index ee56ffbe3..8810c101d 100644 --- a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/internal/QueryExecutor.kt +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/query/internal/QueryExecutor.kt @@ -77,56 +77,25 @@ internal class QueryExecutor( ): Flow { val actualSerializer = serializer ?: defaultSerializer - val coreQueryOpts = object : CoreQueryOptions { - override fun adhoc(): Boolean = adhoc - override fun clientContextId(): String? = clientContextId - override fun consistentWith(): CoreMutationState? = (consistency as? QueryScanConsistency.ConsistentWith)?.toCore() - override fun maxParallelism(): Int? = maxParallelism - override fun metrics(): Boolean = metrics - - override fun namedParameters(): ObjectNode? = (parameters as? QueryParameters.Named)?.let { params -> - // Let the user's serializer serialize arguments - val map = mutableMapOf() - params.inject(map) - val queryBytes = actualSerializer.serialize(map, typeRef()) - Mapper.decodeIntoTree(queryBytes) as ObjectNode - } - - override fun pipelineBatch(): Int? = pipelineBatch - override fun pipelineCap(): Int? = pipelineCap - - override fun positionalParameters(): ArrayNode? = (parameters as? QueryParameters.Positional)?.let { params -> - // Let the user's serializer serialize arguments - val map = mutableMapOf() - params.inject(map) - val queryBytes = actualSerializer.serialize(map, typeRef()) - Mapper.decodeIntoTree(queryBytes).get("args") as? ArrayNode - } - - override fun profile(): CoreQueryProfile = profile.core - - override fun raw(): JsonNode? { - if (raw.isEmpty()) return null - val rawBytes = actualSerializer.serialize(raw, typeRef()) - return Mapper.decodeIntoTree(rawBytes) - } - - override fun readonly(): Boolean = readonly - override fun scanWait(): java.time.Duration? = consistency.scanWait?.toJavaDuration() - override fun scanCap(): Int? = scanCap - - override fun scanConsistency(): CoreQueryScanConsistency? = when (consistency) { - is QueryScanConsistency.NotBounded -> CoreQueryScanConsistency.NOT_BOUNDED - is QueryScanConsistency.RequestPlus -> CoreQueryScanConsistency.REQUEST_PLUS - else -> null // ConsistentWith is handled by separate consistentWith() accessor - } - - override fun flexIndex(): Boolean = flexIndex - override fun preserveExpiry(): Boolean? = if (preserveExpiry) true else null - override fun asTransactionOptions(): CoreSingleQueryTransactionOptions? = null - override fun commonOptions(): CoreCommonOptions = common.toCore() - override fun useReplica(): Boolean? = useReplica - } + val coreQueryOpts = CoreQueryOptions( + common = common, + parameters = parameters, + preserveExpiry = preserveExpiry, + actualSerializer = actualSerializer, + consistency = consistency, + readonly = readonly, + adhoc = adhoc, + flexIndex = flexIndex, + metrics = metrics, + profile = profile, + maxParallelism = maxParallelism, + scanCap = scanCap, + pipelineBatch = pipelineBatch, + pipelineCap = pipelineCap, + clientContextId = clientContextId, + raw = raw, + useReplica = useReplica, + ) return flow { val response = queryOps.queryReactive( @@ -147,3 +116,79 @@ internal class QueryExecutor( } } } + +internal fun CoreQueryOptions( + common: CommonOptions, + parameters: QueryParameters, + preserveExpiry: Boolean, + + actualSerializer: JsonSerializer, + + consistency: QueryScanConsistency, + readonly: Boolean, + adhoc: Boolean, + flexIndex: Boolean, + + metrics: Boolean, + profile: QueryProfile, + + maxParallelism: Int?, + scanCap: Int?, + pipelineBatch: Int?, + pipelineCap: Int?, + + clientContextId: String?, + raw: Map, + useReplica: Boolean?, +): CoreQueryOptions { + return object : CoreQueryOptions { + override fun adhoc(): Boolean = adhoc + override fun clientContextId(): String? = clientContextId + override fun consistentWith(): CoreMutationState? = (consistency as? QueryScanConsistency.ConsistentWith)?.toCore() + override fun maxParallelism(): Int? = maxParallelism + override fun metrics(): Boolean = metrics + + override fun namedParameters(): ObjectNode? = (parameters as? QueryParameters.Named)?.let { params -> + // Let the user's serializer serialize arguments + val map = mutableMapOf() + params.inject(map) + val queryBytes = actualSerializer.serialize(map, typeRef()) + Mapper.decodeIntoTree(queryBytes) as ObjectNode + } + + override fun pipelineBatch(): Int? = pipelineBatch + override fun pipelineCap(): Int? = pipelineCap + + override fun positionalParameters(): ArrayNode? = (parameters as? QueryParameters.Positional)?.let { params -> + // Let the user's serializer serialize arguments + val map = mutableMapOf() + params.inject(map) + val queryBytes = actualSerializer.serialize(map, typeRef()) + Mapper.decodeIntoTree(queryBytes).get("args") as? ArrayNode + } + + override fun profile(): CoreQueryProfile = profile.core + + override fun raw(): JsonNode? { + if (raw.isEmpty()) return null + val rawBytes = actualSerializer.serialize(raw, typeRef()) + return Mapper.decodeIntoTree(rawBytes) + } + + override fun readonly(): Boolean = readonly + override fun scanWait(): java.time.Duration? = consistency.scanWait?.toJavaDuration() + override fun scanCap(): Int? = scanCap + + override fun scanConsistency(): CoreQueryScanConsistency? = when (consistency) { + is QueryScanConsistency.NotBounded -> CoreQueryScanConsistency.NOT_BOUNDED + is QueryScanConsistency.RequestPlus -> CoreQueryScanConsistency.REQUEST_PLUS + else -> null // ConsistentWith is handled by separate consistentWith() accessor + } + + override fun flexIndex(): Boolean = flexIndex + override fun preserveExpiry(): Boolean? = if (preserveExpiry) true else null + override fun asTransactionOptions(): CoreSingleQueryTransactionOptions? = null + override fun commonOptions(): CoreCommonOptions = common.toCore() + override fun useReplica(): Boolean? = useReplica + } +} diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/samples/ClusterSamples.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/samples/ClusterSamples.kt index 01deeea83..9c9feabd8 100644 --- a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/samples/ClusterSamples.kt +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/samples/ClusterSamples.kt @@ -23,10 +23,11 @@ import com.couchbase.client.kotlin.Cluster import com.couchbase.client.kotlin.codec.RawJsonTranscoder import com.couchbase.client.kotlin.env.ClusterEnvironment import com.couchbase.client.kotlin.env.dsl.TrustSource +import com.couchbase.client.kotlin.kv.Durability import com.couchbase.client.kotlin.query.execute import kotlinx.coroutines.runBlocking import java.nio.file.Paths -import java.util.* +import java.util.Optional import kotlin.time.Duration.Companion.seconds @Suppress("UNUSED_VARIABLE") @@ -140,6 +141,14 @@ internal fun configureManyThingsUsingDsl() { connectTimeout = 15.seconds } + transactions { + durabilityLevel = Durability.majorityAndPersistToActive() + + cleanup { + cleanupWindow = 30.seconds + } + } + orphanReporter { emitInterval = 20.seconds } diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/samples/TransactionSamples.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/samples/TransactionSamples.kt new file mode 100644 index 000000000..ca5a80fd9 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/samples/TransactionSamples.kt @@ -0,0 +1,62 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.samples + +import com.couchbase.client.kotlin.Cluster +import com.couchbase.client.kotlin.Collection +import com.couchbase.client.kotlin.transactions.TransactionGetResult +import kotlin.random.Random + + +internal suspend fun simpleTransactionExample( + cluster: Cluster, + collection: Collection, + sourceDocId: String, + destDocId: String, + value: Int +) { + // Assume two documents both contain integers. + // Subtract a value from the one document and add it to the other. + cluster.transactions.run { + // The SDK may execute this lambda multiple times + // if there are conflicts between transactions. + // All logic related to the transaction must happen + // inside this lambda. + + // Inside the lambda, `this` is a `TransactionAttemptContext` + // with instance methods like `get`, `replace`, `insert`, `remove`, + // and `query` for interacting with documents in a transactional way. + // These are the only methods you should use to interact with documents + // inside the transaction lambda. + + val source: TransactionGetResult = get(collection, sourceDocId) + val dest: TransactionGetResult = get(collection, destDocId) + + val newSourceValue: Int = source.contentAs() - value + val newDestValue: Int = dest.contentAs() + value + + replace(source, newSourceValue) + + // Throwing any exception triggers a rollback and causes + // `transactions.run` to throw TransactionFailedException. + if (Random.nextBoolean()) throw RuntimeException("simulated error") + require(newSourceValue >= 0) { "transfer would result in negative source value" } + require(newDestValue >= 0) { "transfer would result in dest value overflow" } + + replace(dest, newDestValue) + } +} diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionAttemptContext.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionAttemptContext.kt new file mode 100644 index 000000000..c28fb76e5 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionAttemptContext.kt @@ -0,0 +1,300 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.transactions + +import com.couchbase.client.core.api.query.CoreQueryResult +import com.couchbase.client.core.cnc.CbTracing +import com.couchbase.client.core.cnc.RequestSpan +import com.couchbase.client.core.cnc.TracingIdentifiers +import com.couchbase.client.core.error.CasMismatchException +import com.couchbase.client.core.error.DocumentExistsException +import com.couchbase.client.core.error.DocumentNotFoundException +import com.couchbase.client.core.msg.kv.CodecFlags +import com.couchbase.client.core.transaction.CoreTransactionAttemptContext +import com.couchbase.client.core.transaction.support.SpanWrapper +import com.couchbase.client.kotlin.Collection +import com.couchbase.client.kotlin.CommonOptions +import com.couchbase.client.kotlin.Scope +import com.couchbase.client.kotlin.codec.Content +import com.couchbase.client.kotlin.codec.JsonSerializer +import com.couchbase.client.kotlin.codec.TypeRef +import com.couchbase.client.kotlin.codec.typeRef +import com.couchbase.client.kotlin.internal.await +import com.couchbase.client.kotlin.query.QueryMetadata +import com.couchbase.client.kotlin.query.QueryParameters +import com.couchbase.client.kotlin.query.QueryProfile +import com.couchbase.client.kotlin.query.QueryResult +import com.couchbase.client.kotlin.query.QueryRow +import com.couchbase.client.kotlin.query.QueryScanConsistency +import com.couchbase.client.kotlin.query.internal.CoreQueryOptions +import kotlinx.coroutines.reactive.awaitSingle +import java.util.UUID +import java.util.stream.Collectors + +public class TransactionAttemptContext internal constructor( + private val internal: CoreTransactionAttemptContext, + private val defaultJsonSerializer: JsonSerializer, +) { + /** + * Gets a document from the specified Couchbase [collection] matching the specified [id]. + * + * @param collection the Couchbase collection containing the document + * @param id the ID of the document to get + * @return a [TransactionGetResult] containing the document + * @throws DocumentNotFoundException if the document does not exist + */ + public suspend fun get(collection: Collection, id: String): TransactionGetResult { + val core = internal.get(collection.collectionId, id).awaitSingle() + return TransactionGetResult(core, defaultJsonSerializer) + } + + /** + * Mutates the specified [doc] with new content. + * + * The mutation is staged until the transaction is committed. + * That is, any read of the document by any Couchbase component + * will see the document's current value, rather than this staged or 'dirty' data. + * If the attempt is rolled back, the staged mutation will be removed. + * + * This staged data effectively locks the document from other transactional writes + * until the attempt completes (commits or rolls back). + * + * If the mutation fails with a [CasMismatchException] or any other exception, + * the transaction will automatically roll back this attempt, then retry. + * + * @param doc identifies the document to update + * @param content the new content for the document. + * @return the document, updated with its new CAS value. + */ + public suspend inline fun replace( + doc: TransactionGetResult, + content: T, + jsonSerializer: JsonSerializer? = null, + ): TransactionGetResult { + return replaceInternal(doc, content, typeRef(), jsonSerializer) + } + + @PublishedApi + internal suspend fun replaceInternal( + doc: TransactionGetResult, + content: T, + type: TypeRef, + jsonSerializer: JsonSerializer?, + ): TransactionGetResult { + val span: RequestSpan = CbTracing.newSpan(internal.core().context(), TracingIdentifiers.TRANSACTION_OP_REPLACE, internal.span()) + + val encoded = serialize(content, type, jsonSerializer) + val core = internal.replace(doc.internal, encoded.bytes, encoded.flags, SpanWrapper(span)).awaitSingle() + return TransactionGetResult(core, defaultJsonSerializer) + } + + /** + * Inserts a new document into the specified Couchbase [collection]. + * + * As with [replace], the insert is staged until the transaction is committed. + * Due to technical limitations, it is not as possible to completely hide the staged data + * from the rest of the Couchbase platform, as an empty document must be created. + * + * This staged data effectively locks the document from other transactional writes + * until the attempt completes (commits or rolls back). + * + * @param collection the Couchbase collection in which to insert the doc + * @param id the document's unique ID + * @param content the content to insert. + * @return the document, updated with its new CAS value and ID, and converted to a [TransactionGetResult] + * @throws DocumentExistsException if the collection already contains a document with the given ID. + */ + public suspend inline fun insert( + collection: Collection, + id: String, + content: T, + jsonSerializer: JsonSerializer? = null, + ): TransactionGetResult { + return insertInternal(collection, id, content, typeRef(), jsonSerializer) + } + + @PublishedApi + internal suspend fun insertInternal( + collection: Collection, + id: String, + content: T, + type: TypeRef, + jsonSerializer: JsonSerializer?, + ): TransactionGetResult { + val span: RequestSpan = CbTracing.newSpan(internal.core().context(), TracingIdentifiers.TRANSACTION_OP_INSERT, internal.span()) + + val encoded = serialize(content, type, jsonSerializer) + val core = internal.insert(collection.collectionId, id, encoded.bytes, encoded.flags, SpanWrapper(span)).awaitSingle() + return TransactionGetResult(core, defaultJsonSerializer) + } + + /** + * Removes the specified [doc]. + * + * As with [replace], the remove is staged until the transaction is committed. + * That is, the document will continue to exist, and the rest of the Couchbase platform will continue to see it. + * + * This staged data effectively locks the document from other transactional writes + * until the attempt completes (commits or rolls back). + * + * Note that an overload that takes the document ID as a string is not possible, as it's necessary to check a provided + * [TransactionGetResult] to determine if the document is involved in another transaction. + * + * @param doc the document to remove + */ + public suspend fun remove(doc: TransactionGetResult) { + val span: RequestSpan = CbTracing.newSpan(internal.core().context(), TracingIdentifiers.TRANSACTION_OP_REMOVE, internal.span()) + + internal.remove(doc.internal, SpanWrapper(span)).await() + } + + /** + * Executes a SQL++ query, buffers all result rows in memory, and returns them as a [QueryResult]. + * + * @param statement the SQL++ statement to execute. + * + * @param parameters parameters to the SQL++ statement. + * + * @param scope the query context, or null to execute a cluster-level query. + * + * @param serializer the serializer to use for converting parameters to JSON, + * and the default serializer for parsing [QueryRow] content. + * Defaults to the serializer configured on the cluster environment. + * + * @param consistency the scan consistency level for this query. + * Only [QueryScanConsistency.requestPlus] and [QueryScanConsistency.notBounded] are + * supported in transactions. Note that the default scan consistency for transactional queries is + * [QueryScanConsistency.requestPlus]; this differs from non-transaction queries, + * which default to [QueryScanConsistency.notBounded]. + * + * @param readonly pass true if the SQL++ statement does not modify documents. + * This enables certain optimizations, and ensures a query fails instead of accidentally modifying data. + * + * @param adhoc pass false if this is a commonly used query that should be + * turned into a prepared statement for faster execution. + * + * @param flexIndex pass true to use a full-text index instead of a query index. + * + * @param profile specifies how much profiling information to include in + * the response (access via [QueryMetadata.profile]). Profiling is + * relatively expensive, and can impact the performance of the server + * query engine. Not recommended for use in production, unless you're + * diagnosing a specific issue. Note this is an Enterprise Edition feature. + * On Community Edition the parameter will be accepted, but no profiling + * information returned. + * + * @param scanCap Maximum buffered channel size between the indexer client + * and the query service for index scans. This parameter controls when to use + * scan backfill. Use 0 or a negative number to disable. Smaller values + * reduce GC, while larger values reduce indexer backfill. + * + * @param pipelineBatch Controls the number of items execution operators + * can batch for Fetch from the KV. + * + * @param pipelineCap Maximum number of items each execution operator + * can buffer between various operators. + * + * @param clientContextId an arbitrary string that identifies this query + * for diagnostic purposes. + * + * @param raw an "escape hatch" for passing arbitrary query options that + * aren't otherwise exposed by this method. + */ + public suspend fun query( + statement: String, + parameters: QueryParameters = QueryParameters.None, + scope: Scope? = null, + serializer: JsonSerializer? = null, + + consistency: QueryScanConsistency = QueryScanConsistency.requestPlus(), + readonly: Boolean = false, + adhoc: Boolean = true, + flexIndex: Boolean = false, + + profile: QueryProfile = QueryProfile.OFF, + + scanCap: Int? = null, + pipelineBatch: Int? = null, + pipelineCap: Int? = null, + + clientContextId: String? = UUID.randomUUID().toString(), + raw: Map = emptyMap(), + ): QueryResult { + require(consistency !is QueryScanConsistency.ConsistentWith) { + "Query in transaction does not support `QueryScanConsistency.ConsistentWith`." + } + + val actualSerializer = serializer ?: defaultJsonSerializer + + val common = CommonOptions.Default + val maxParallelism: Int? = null + val metrics = true + val preserveExpiry = false + val useReplica: Boolean? = null + + val coreQueryOpts = CoreQueryOptions( + common = common, + parameters = parameters, + preserveExpiry = preserveExpiry, + actualSerializer = actualSerializer, + consistency = consistency, + readonly = readonly, + adhoc = adhoc, + flexIndex = flexIndex, + metrics = metrics, + profile = profile, + maxParallelism = maxParallelism, + scanCap = scanCap, + pipelineBatch = pipelineBatch, + pipelineCap = pipelineCap, + clientContextId = clientContextId, + raw = raw, + useReplica = useReplica, + ) + + val coreQueryResult: CoreQueryResult = internal.queryBlocking( + statement, + scope?.queryContext, + coreQueryOpts, + false, + ).awaitSingle() + + val rows = coreQueryResult.rows() + .map { QueryRow(it.data(), actualSerializer) } + .collect(Collectors.toList()) + + val metadata = QueryMetadata(coreQueryResult.metaData()) + + return QueryResult(rows, metadata) + } + + private fun serialize( + content: T, + type: TypeRef, + jsonSerializer: JsonSerializer?, + ): Content { + if (content is Content) { + require(content.flags == CodecFlags.JSON_COMPAT_FLAGS || content.flags == CodecFlags.BINARY_COMPAT_FLAGS) { + "Content in transaction must be flagged as JSON or BINARY, but got ${content.flags}" + } + return content + } + val jsonBytes = (jsonSerializer ?: defaultJsonSerializer).serialize(content, type) + return Content.json(jsonBytes) + } + +} diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionCommitAmbiguousException.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionCommitAmbiguousException.kt new file mode 100644 index 000000000..0a3328367 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionCommitAmbiguousException.kt @@ -0,0 +1,38 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.transactions + +import com.couchbase.client.core.error.transaction.internal.CoreTransactionCommitAmbiguousException + +/** + * The transaction expired at the point of trying to commit it. + * It is ambiguous whether the transaction has committed or not. + * Actors may be able to see the content of this transaction. + * + * This error is result of inevitable and unavoidable edge cases when working with unreliable networks. + * For example, consider an ordinary mutation being made over the network to any database. + * The mutation could succeed on the database-side, and then just before the result is returned to the client, the network connection drops. + * The client cannot receive the success result and will time out - it is ambiguous to it whether the mutation succeeded or not. + * + * The transactions layer will work to resolve the ambiguity up until the transaction expires, but if unable to resolve + * it in that time, it is forced to raise this error. + * The transaction may or may not have been successful, and error-handling of this is highly application-dependent. + * + * An asynchronous cleanup process will try to complete the transaction: roll it back if it didn't commit, roll it + * forwards if it did. + */ +public class TransactionCommitAmbiguousException(e: CoreTransactionCommitAmbiguousException) : TransactionFailedException(e) diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionExpiredException.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionExpiredException.kt new file mode 100644 index 000000000..ea3d92950 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionExpiredException.kt @@ -0,0 +1,27 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.transactions + +import com.couchbase.client.core.error.transaction.internal.CoreTransactionExpiredException + +/** + * The transaction could not be fully completed in the configured timeout. + * + * It is in an undefined state, but it unambiguously did not reach the commit point. + * No actors will be able to see the contents of this transaction. + */ +public class TransactionExpiredException(e: CoreTransactionExpiredException) : TransactionFailedException(e) diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionFailedException.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionFailedException.kt new file mode 100644 index 000000000..e521af8ad --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionFailedException.kt @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.transactions + +import com.couchbase.client.core.cnc.events.transaction.TransactionLogEvent +import com.couchbase.client.core.error.CouchbaseException +import com.couchbase.client.core.error.transaction.internal.CoreTransactionCommitAmbiguousException +import com.couchbase.client.core.error.transaction.internal.CoreTransactionExpiredException +import com.couchbase.client.core.error.transaction.internal.CoreTransactionFailedException + +/** + * The transaction failed to reach the Committed point. + * + * No actors can see any changes made by this transaction. + */ +public open class TransactionFailedException( + private val wrapped: CoreTransactionFailedException, +) : CouchbaseException(wrapped.message, wrapped.cause) { + + /** + * Returns the in-memory log built up during each transaction. + * The application may want to write this to their own logs, for example upon transaction failure. + */ + public val logs: List + get() = wrapped.logger().logs() + + public val transactionId: String + get() = wrapped.transactionId() + + internal companion object { + internal fun convertTransactionFailedInternal(err: Throwable) : Throwable { + return when (err) { + is CoreTransactionCommitAmbiguousException -> TransactionCommitAmbiguousException(err) + is CoreTransactionExpiredException -> TransactionExpiredException(err) + is CoreTransactionFailedException -> TransactionFailedException(err) + else -> err + } + } + } +} diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionGetResult.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionGetResult.kt new file mode 100644 index 000000000..f2fcabc5c --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionGetResult.kt @@ -0,0 +1,62 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.transactions + +import com.couchbase.client.core.transaction.CoreTransactionGetResult +import com.couchbase.client.kotlin.Keyspace +import com.couchbase.client.kotlin.codec.Content +import com.couchbase.client.kotlin.codec.JsonSerializer +import com.couchbase.client.kotlin.codec.typeRef + +/** + * A document inside a transaction. + */ +public class TransactionGetResult internal constructor( + internal val internal: CoreTransactionGetResult, + @property:PublishedApi internal val defaultJsonSerializer: JsonSerializer, +) { + /** + * The document's ID. + */ + public val id: String = internal.id() + + /** + * The fully-qualified name of the collection containing the document. + */ + public val keyspace: Keyspace = internal.collection().toKeyspace() + + /** + * The retrieved content. Useful for accessing the raw bytes + * of the document. + */ + public val content: Content = + if (internal.isBinary) Content.binary(internal.contentAsBytes()) + else Content.json(internal.contentAsBytes()) + + /** + * Returns the document content after deserializing it into the type + * specified by the type parameter. + * + * @param jsonSerializer the serializer to use, or null to use the serializer + * configured on the cluster environment. + */ + public inline fun contentAs(jsonSerializer: JsonSerializer? = null): T { + return (jsonSerializer ?: defaultJsonSerializer).deserialize(content.bytes, typeRef()) + } + + override fun toString(): String = internal.toString() +} diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionResult.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionResult.kt new file mode 100644 index 000000000..0c7c8c080 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/TransactionResult.kt @@ -0,0 +1,71 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.transactions + +import com.couchbase.client.core.cnc.events.transaction.TransactionLogEvent +import com.couchbase.client.core.transaction.CoreTransactionResult +import kotlin.time.Duration +import kotlin.time.toKotlinDuration + +/** + * Holds the value returned by the transaction lambda, as well as + * debugging and logging facilities for tracking what happened during the transaction. + * + * Note that the success or failure of a transaction is determined solely by whether it threw a + * [TransactionFailedException]. + * + * @param V The type of value returned by the transaction lambda + * @property value The value returned by the transaction lambda + */ +public class TransactionResult internal constructor( + public val value: V, + internal val internal: CoreTransactionResult, +) { + /** + * The in-memory log built up during each transaction. The application may want to write this to their own logs, + * for example upon transaction failure. + */ + public val logs: List + get() = internal.log().logs() + + /** + * Total time taken by a transaction. + */ + public val timeTaken: Duration + get() = internal.timeTaken().toKotlinDuration() + + /** + * The ID of this transaction. + */ + public val transactionId: String + get() = internal.transactionId() + + /** + * Indicates whether all documents were successfully unstaged (committed). + * + * This will only be true if the transaction reached the COMMIT point and then went on to reach + * the COMPLETE point. + * + * It will be false for transactions that: + * - Rolled back + * - Were read-only + */ + public val unstagingComplete: Boolean + get() = internal.unstagingComplete() + + override fun toString(): String = "TransactionResult(value=$value, internal=$internal)" +} diff --git a/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/Transactions.kt b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/Transactions.kt new file mode 100644 index 000000000..73c165260 --- /dev/null +++ b/kotlin-client/src/main/kotlin/com/couchbase/client/kotlin/transactions/Transactions.kt @@ -0,0 +1,144 @@ +/* + * Copyright 2024 Couchbase, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.couchbase.client.kotlin.transactions + +import com.couchbase.client.core.Core +import com.couchbase.client.core.cnc.RequestSpan +import com.couchbase.client.core.error.transaction.internal.CoreTransactionFailedException +import com.couchbase.client.core.io.CollectionIdentifier +import com.couchbase.client.core.io.CollectionIdentifier.DEFAULT_COLLECTION +import com.couchbase.client.core.io.CollectionIdentifier.DEFAULT_SCOPE +import com.couchbase.client.core.transaction.CoreTransactionAttemptContext +import com.couchbase.client.core.transaction.CoreTransactionsReactive +import com.couchbase.client.core.transaction.config.CoreTransactionOptions +import com.couchbase.client.core.transaction.support.TransactionAttemptContextFactory +import com.couchbase.client.kotlin.Keyspace +import com.couchbase.client.kotlin.env.dsl.checkTransactionDurability +import com.couchbase.client.kotlin.env.env +import com.couchbase.client.kotlin.internal.toOptional +import com.couchbase.client.kotlin.kv.Durability +import com.couchbase.client.kotlin.manager.bucket.levelIfSynchronous +import kotlinx.coroutines.reactive.awaitSingle +import kotlinx.coroutines.reactor.mono +import java.util.Optional +import kotlin.time.Duration +import kotlin.time.toJavaDuration + +/** + * The starting point for accessing Couchbase transactions. + * + * @see run + */ +public class Transactions internal constructor(internal val core: Core) { + private val internal = CoreTransactionsReactive(core, core.env.transactionsConfig()) + + /** + * Runs supplied transactional logic until success or failure. + * + * The supplied transactional logic will be run if necessary multiple times, until either: + * + * - The transaction successfully commits + * - The transactional logic requests an explicit rollback + * - The transaction times out. + * - An exception is thrown, either inside the transaction library or by the supplied transaction logic, + * that cannot be handled. + * + * The transaction logic {@link Consumer} is provided an {@link TransactionAttemptContext}, which contains methods allowing it + * to read, mutate, insert and delete documents, as well as commit or rollback the transaction. + * + * If the transaction logic performs a commit or rollback it must be the last operation performed. + * Else a [TransactionFailedException] will be thrown. + * Similarly, there cannot be a commit followed by a rollback, or vice versa - this will also raise a [TransactionFailedException]. + * + * If the transaction logic does not perform an explicit commit or rollback, then a commit will be performed + * anyway. + * + * @param durability Durability level for this transaction, or null to use the cluster environment's default transaction durability. + * Must not be [Durability.none] or [Durability.clientVerified], which are not compatible with transactions. + * + * @param timeout Time allowed for this transaction to complete, or null to use the cluster environment's default timeout duration. + * @param metadataCollection The location in Couchbase to store metadata this transaction, or null to use the cluster environment's default metadata collection. + * + * @return The value returned by the transaction logic, along with diagnostic information. + * + * @throws TransactionFailedException or a derived exception if the transaction fails to commit for any reason, possibly + * after multiple retries. The exception contains further details of the error + * + * @sample com.couchbase.client.kotlin.samples.simpleTransactionExample + */ + public suspend fun run( + timeout: Duration? = null, + parentSpan: RequestSpan? = null, + durability: Durability? = null, + metadataCollection: Keyspace? = null, + transactionLogic: suspend TransactionAttemptContext.() -> V, + ): TransactionResult { + return runInternal( + timeout, parentSpan, durability, metadataCollection, null, transactionLogic, + ) + } + + internal suspend fun runInternal( + timeout: Duration? = null, + parentSpan: RequestSpan? = null, + durability: Durability? = null, + metadataCollection: Keyspace? = null, + attemptContextFactory: TransactionAttemptContextFactory?, + transactionLogic: suspend TransactionAttemptContext.() -> V, + ): TransactionResult { + + checkTransactionDurability(durability) + + val perConfig = CoreTransactionOptions( + durability?.levelIfSynchronous() ?: Optional.empty(), + Optional.empty(), // scan consistency + parentSpan.toOptional(), + timeout?.toJavaDuration().toOptional(), + metadataCollection?.toCollectionIdentifier().toOptional(), + attemptContextFactory.toOptional(), + ) + + var value: V? = null + val function = { ctx: CoreTransactionAttemptContext -> + mono { + value = transactionLogic(TransactionAttemptContext(ctx, core.env.jsonSerializer)) + } + } + + try { + val coreResult = internal.run(function, perConfig).awaitSingle() + @Suppress("UNCHECKED_CAST") + return TransactionResult(value as V, coreResult) + } catch (t: CoreTransactionFailedException) { + throw TransactionFailedException.convertTransactionFailedInternal(t) + } + } +} + +internal fun Keyspace.toCollectionIdentifier() = + CollectionIdentifier( + bucket, + Optional.of(scope), + Optional.of(collection) + ) + +internal fun CollectionIdentifier.toKeyspace() = + Keyspace( + bucket(), + scope().orElse(DEFAULT_SCOPE), + collection().orElse(DEFAULT_COLLECTION) + ) diff --git a/kotlin-fit-performer/pom.xml b/kotlin-fit-performer/pom.xml index 833b1a7f7..e00de55e7 100644 --- a/kotlin-fit-performer/pom.xml +++ b/kotlin-fit-performer/pom.xml @@ -7,7 +7,7 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 fit-performer-kotlin diff --git a/metrics-micrometer/pom.xml b/metrics-micrometer/pom.xml index 2ca77a88a..e8bedb5b3 100644 --- a/metrics-micrometer/pom.xml +++ b/metrics-micrometer/pom.xml @@ -8,11 +8,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 metrics-micrometer - 0.7.3 + 0.7.6 Micrometer Metrics Interoperability Provides interoperability with Micrometer diff --git a/metrics-micrometer/src/integrationTest/resources/integration.properties b/metrics-micrometer/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/metrics-micrometer/src/integrationTest/resources/integration.properties +++ b/metrics-micrometer/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/metrics-opentelemetry/pom.xml b/metrics-opentelemetry/pom.xml index 05ed82437..2b1245c29 100644 --- a/metrics-opentelemetry/pom.xml +++ b/metrics-opentelemetry/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 metrics-opentelemetry - 0.7.3 + 0.7.6 OpenTelemetry Metrics Interoperability Provides interoperability with OpenTelemetry Metrics diff --git a/metrics-opentelemetry/src/integrationTest/resources/integration.properties b/metrics-opentelemetry/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/metrics-opentelemetry/src/integrationTest/resources/integration.properties +++ b/metrics-opentelemetry/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/osgi-feature/pom.xml b/osgi-feature/pom.xml index 3e96ee937..b45a82d44 100644 --- a/osgi-feature/pom.xml +++ b/osgi-feature/pom.xml @@ -7,11 +7,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 osgi-feature - 3.7.3 + 3.7.6 pom Couchbase Java SDK OSGI Feature diff --git a/pom.xml b/pom.xml index 0c97d5771..10ee17a80 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ 4.0.0 com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 pom Couchbase JVM Client Parent @@ -15,26 +15,26 @@ UTF-8 UTF-8 - 3.7.3 - 1.7.3 - 3.7.3 - 3.7.3 - 1.7.3 - 1.7.3 - 1.4.3 - 0.1.0 - 1.5.3 - 0.7.3 - 1.7.3 + 3.7.6 + 1.7.6 + 3.7.6 + 3.7.6 + 1.7.6 + 1.7.6 + 1.4.6 + 1.0.6 + 1.5.6 + 0.7.6 + 1.7.6 - 5.9.1 + 5.11.2 3.23.1 2.20.0 2.17.2 3.6.9 1.0.4 - 1.0.9.RELEASE + 1.0.10.RELEASE 1.12.9 1.7.36 4.4.3 @@ -42,8 +42,9 @@ 1.2.11 2.9.1 + 0.8.0 - 0.14.3 + 0.14.10 2.9.4 @@ -51,9 +52,10 @@ 0.9.23 3.6.12 - 1.5.1 - 3.2.0 - 2.11.0 + 1.6.0 + + 3.3.1 + 2.12.0 3.2.4 3.2.1 diff --git a/protostellar/pom.xml b/protostellar/pom.xml index 8be5c95fd..7ff035665 100644 --- a/protostellar/pom.xml +++ b/protostellar/pom.xml @@ -15,9 +15,10 @@ 9999.0-SNAPSHOT - + 1.66.0 - 3.23.2 + 3.25.5 8 8 UTF-8 diff --git a/scala-client/pom.xml b/scala-client/pom.xml index ceaabf2f1..ab282be34 100644 --- a/scala-client/pom.xml +++ b/scala-client/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 scala-client_${scala.compat.version} - 1.7.3 + 1.7.6 jar Couchbase Scala SDK diff --git a/scala-client/src/integrationTest/resources/integration.properties b/scala-client/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/scala-client/src/integrationTest/resources/integration.properties +++ b/scala-client/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/scala-client/src/integrationTest/scala/com/couchbase/client/scala/KeyValueExpirySpec.scala b/scala-client/src/integrationTest/scala/com/couchbase/client/scala/KeyValueExpirySpec.scala index a0e81a8f6..4c93776d6 100644 --- a/scala-client/src/integrationTest/scala/com/couchbase/client/scala/KeyValueExpirySpec.scala +++ b/scala-client/src/integrationTest/scala/com/couchbase/client/scala/KeyValueExpirySpec.scala @@ -75,33 +75,40 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { val expiryDuration = 3.second val nearFuture = Instant.now.plus(expiryDuration.toSeconds, SECONDS) - val insertWithInstant = DocAndOperation("insertWithInstant", + val insertWithInstant = DocAndOperation( + "insertWithInstant", (docId) => assert(coll.insert(docId, content, InsertOptions().expiry(nearFuture)).isSuccess), upsertDocFirst = false ) - val insertWithDuration = DocAndOperation("insertWithDuration", + val insertWithDuration = DocAndOperation( + "insertWithDuration", (docId) => assert(coll.insert(docId, content, InsertOptions().expiry(expiryDuration)).isSuccess), upsertDocFirst = false ) - val replaceWithInstant = DocAndOperation("replaceWithInstant", + val replaceWithInstant = DocAndOperation( + "replaceWithInstant", (docId) => assert(coll.replace(docId, content, ReplaceOptions().expiry(nearFuture)).isSuccess) ) - val replaceWithDuration = DocAndOperation("replaceWithDuration", + val replaceWithDuration = DocAndOperation( + "replaceWithDuration", (docId) => assert(coll.replace(docId, content, ReplaceOptions().expiry(expiryDuration)).isSuccess) ) - val upsertWithInstant = DocAndOperation("upsertWithInstant", + val upsertWithInstant = DocAndOperation( + "upsertWithInstant", (docId) => assert(coll.upsert(docId, content, UpsertOptions().expiry(nearFuture)).isSuccess) ) - val upsertWithDuration = DocAndOperation("upsertWithDuration", + val upsertWithDuration = DocAndOperation( + "upsertWithDuration", (docId) => assert(coll.upsert(docId, content, UpsertOptions().expiry(expiryDuration)).isSuccess) ) - val mutateInWithInstant = DocAndOperation("mutateInWithInstant", + val mutateInWithInstant = DocAndOperation( + "mutateInWithInstant", (docId) => assert( coll @@ -113,7 +120,8 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { .isSuccess ) ) - val mutateInWithDuration = DocAndOperation("mutateInWithDuration", + val mutateInWithDuration = DocAndOperation( + "mutateInWithDuration", (docId) => assert( coll @@ -126,7 +134,8 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { ) ) - val incrementWithInstant = DocAndOperation("incrementWithInstant", + val incrementWithInstant = DocAndOperation( + "incrementWithInstant", (docId) => assert( coll.binary @@ -135,7 +144,8 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { ), upsertDocFirst = false ) - val incrementWithDuration = DocAndOperation("incrementWithDuration", + val incrementWithDuration = DocAndOperation( + "incrementWithDuration", (docId) => assert( coll.binary @@ -145,7 +155,8 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { upsertDocFirst = false ) - val decrementWithInstant = DocAndOperation("decrementWithInstant", + val decrementWithInstant = DocAndOperation( + "decrementWithInstant", (docId) => assert( coll.binary @@ -154,7 +165,8 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { ), upsertDocFirst = false ) - val decrementWithDuration = DocAndOperation("decrementWithDuration", + val decrementWithDuration = DocAndOperation( + "decrementWithDuration", (docId) => assert( coll.binary @@ -200,9 +212,9 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { // Immediately after, the doc should exist coll.get(operation.docId, GetOptions().withExpiry(true)) match { case Success(result) => - ScalaIntegrationTest.Logger.info(s"${operation.name}: fetched ${result}") - assert(result.expiry.isDefined) - case Failure(err) => assert(false, s"unexpected error $err") + ScalaIntegrationTest.Logger.info(s"${operation.name}: fetched ${result}") + assert(result.expiry.isDefined) + case Failure(err) => assert(false, s"unexpected error $err") } }) @@ -211,13 +223,13 @@ class KeyValueExpirySpec extends ScalaIntegrationTest { // After a sleep the doc should be gone operations.foreach(operation => { - ScalaIntegrationTest.Logger.info(s"${operation.name}: fetching after sleep") + ScalaIntegrationTest.Logger.info(s"${operation.name}: fetching after sleep") - coll.get(operation.docId) match { + coll.get(operation.docId) match { case Failure(x: DocumentNotFoundException) => - case x => - ScalaIntegrationTest.Logger.info(s"${operation.name}: fetched after sleep $x") - assert(false, s"Unexpected result $x") + case x => + ScalaIntegrationTest.Logger.info(s"${operation.name}: fetched after sleep $x") + assert(false, s"Unexpected result $x") } }) } diff --git a/scala-client/src/integrationTest/scala/com/couchbase/client/scala/util/ScalaIntegrationTest.scala b/scala-client/src/integrationTest/scala/com/couchbase/client/scala/util/ScalaIntegrationTest.scala index 731287cae..fd6206a71 100644 --- a/scala-client/src/integrationTest/scala/com/couchbase/client/scala/util/ScalaIntegrationTest.scala +++ b/scala-client/src/integrationTest/scala/com/couchbase/client/scala/util/ScalaIntegrationTest.scala @@ -44,7 +44,7 @@ import scala.jdk.CollectionConverters._ * @since 3.0.0 */ object ScalaIntegrationTest { - val Logger = LoggerFactory.getLogger(classOf[ScalaIntegrationTest]) + val Logger = LoggerFactory.getLogger(classOf[ScalaIntegrationTest]) } // Temporarily increased timeout to (possibly) workaround MB-37011 when Developer Preview enabled diff --git a/scala-client/src/main/scala/com/couchbase/client/scala/AsyncCollection.scala b/scala-client/src/main/scala/com/couchbase/client/scala/AsyncCollection.scala index c9fb0636e..a8cda278d 100644 --- a/scala-client/src/main/scala/com/couchbase/client/scala/AsyncCollection.scala +++ b/scala-client/src/main/scala/com/couchbase/client/scala/AsyncCollection.scala @@ -17,7 +17,12 @@ package com.couchbase.client.scala import com.couchbase.client.core.annotation.SinceCouchbase import com.couchbase.client.core.api.CoreCouchbaseOps -import com.couchbase.client.core.api.kv.{CoreExpiry, CoreSubdocGetCommand, CoreSubdocGetResult} +import com.couchbase.client.core.api.kv.{ + CoreExpiry, + CoreReadPreference, + CoreSubdocGetCommand, + CoreSubdocGetResult +} import com.couchbase.client.core.api.shared.CoreMutationState import com.couchbase.client.core.cnc.RequestSpan import com.couchbase.client.core.endpoint.http.CoreCommonOptions @@ -57,11 +62,11 @@ private[scala] case class HandlerParams( collectionIdentifier: CollectionIdentifier, env: ClusterEnvironment ) { - def tracer = env.coreEnv.requestTracer() + def tracer = core.coreResources.requestTracer } private[scala] case class HandlerBasicParams(core: Core) { - def tracer = core.context.environment.requestTracer + def tracer = core.context.coreResources.requestTracer } /** Provides asynchronous access to all collection APIs, based around Scala `Future`s. This is the main entry-point @@ -490,9 +495,9 @@ class AsyncCollection( id: String, timeout: Duration = kvReadTimeout ): Future[GetReplicaResult] = { - convert(kvOps.getAnyReplicaReactive(makeCommonOptions(timeout), id)) - .map(result => convertReplica(result, environment, None)) - .toFuture + convert( + kvOps.getAnyReplicaReactive(makeCommonOptions(timeout), id, CoreReadPreference.NO_PREFERENCE) + ).map(result => convertReplica(result, environment, None)).toFuture } /** Retrieves any available version of the document. @@ -502,7 +507,7 @@ class AsyncCollection( id: String, options: GetAnyReplicaOptions ): Future[GetReplicaResult] = { - convert(kvOps.getAnyReplicaReactive(convert(options), id)) + convert(kvOps.getAnyReplicaReactive(convert(options), id, CoreReadPreference.NO_PREFERENCE)) .map(result => convertReplica(result, environment, options.transcoder)) .toFuture } @@ -538,7 +543,7 @@ class AsyncCollection( // Since the API here returns a Seq and not a Future, there is unfortunately // no option but to block & buffer the stream and return already completed/failed Futures. // Users that require a true streaming solution should use the reactive version. - convert(kvOps.getAllReplicasReactive(convert(options), id)) + convert(kvOps.getAllReplicasReactive(convert(options), id, CoreReadPreference.NO_PREFERENCE)) .map(result => convertReplica(result, environment, options.transcoder)) .collectSeq() .block(options.timeout) @@ -575,9 +580,14 @@ class AsyncCollection( spec: collection.Seq[LookupInSpec], options: LookupInAnyReplicaOptions ): Future[LookupInReplicaResult] = { - convert(kvOps.subdocGetAnyReplicaReactive(convert(options), id, LookupInSpec.map(spec).asJava)) - .map(result => convertLookupInReplica(result, environment)) - .toFuture + convert( + kvOps.subdocGetAnyReplicaReactive( + convert(options), + id, + LookupInSpec.map(spec).asJava, + CoreReadPreference.NO_PREFERENCE + ) + ).map(result => convertLookupInReplica(result, environment)).toFuture } /** SubDocument lookups allow retrieving parts of a JSON document directly, which may be more efficient than @@ -618,8 +628,14 @@ class AsyncCollection( spec: collection.Seq[LookupInSpec], options: LookupInAllReplicasOptions ): Seq[Future[LookupInReplicaResult]] = { - convert(kvOps.subdocGetAllReplicasReactive(convert(options), id, LookupInSpec.map(spec).asJava)) - .map(result => convertLookupInReplica(result, environment)) + convert( + kvOps.subdocGetAllReplicasReactive( + convert(options), + id, + LookupInSpec.map(spec).asJava, + CoreReadPreference.NO_PREFERENCE + ) + ).map(result => convertLookupInReplica(result, environment)) .collectSeq() .block(options.timeout) .map(result => Future.successful(result)) diff --git a/scala-client/src/main/scala/com/couchbase/client/scala/ReactiveCollection.scala b/scala-client/src/main/scala/com/couchbase/client/scala/ReactiveCollection.scala index 911f9163c..e66221db4 100644 --- a/scala-client/src/main/scala/com/couchbase/client/scala/ReactiveCollection.scala +++ b/scala-client/src/main/scala/com/couchbase/client/scala/ReactiveCollection.scala @@ -17,7 +17,7 @@ package com.couchbase.client.scala import com.couchbase.client.core.annotation.SinceCouchbase -import com.couchbase.client.core.api.kv.CoreExpiry +import com.couchbase.client.core.api.kv.{CoreExpiry, CoreReadPreference} import com.couchbase.client.core.io.CollectionIdentifier import com.couchbase.client.scala.codec.JsonSerializer import com.couchbase.client.scala.durability.Durability @@ -399,7 +399,12 @@ class ReactiveCollection(async: AsyncCollection) { ): SMono[LookupInReplicaResult] = { convert( kvOps - .subdocGetAnyReplicaReactive(makeCommonOptions(timeout), id, LookupInSpec.map(spec).asJava) + .subdocGetAnyReplicaReactive( + makeCommonOptions(timeout), + id, + LookupInSpec.map(spec).asJava, + CoreReadPreference.NO_PREFERENCE + ) ).map(result => convertLookupInReplica(result, environment)) } @@ -421,8 +426,14 @@ class ReactiveCollection(async: AsyncCollection) { spec: collection.Seq[LookupInSpec], options: LookupInAnyReplicaOptions ): SMono[LookupInReplicaResult] = { - convert(kvOps.subdocGetAnyReplicaReactive(convert(options), id, LookupInSpec.map(spec).asJava)) - .map(result => convertLookupInReplica(result, environment)) + convert( + kvOps.subdocGetAnyReplicaReactive( + convert(options), + id, + LookupInSpec.map(spec).asJava, + CoreReadPreference.NO_PREFERENCE + ) + ).map(result => convertLookupInReplica(result, environment)) } /** SubDocument lookups allow retrieving parts of a JSON document directly, which may be more efficient than @@ -445,7 +456,12 @@ class ReactiveCollection(async: AsyncCollection) { ): SFlux[LookupInReplicaResult] = { convert( kvOps - .subdocGetAllReplicasReactive(makeCommonOptions(timeout), id, LookupInSpec.map(spec).asJava) + .subdocGetAllReplicasReactive( + makeCommonOptions(timeout), + id, + LookupInSpec.map(spec).asJava, + CoreReadPreference.NO_PREFERENCE + ) ).map(result => convertLookupInReplica(result, environment)) } @@ -467,8 +483,14 @@ class ReactiveCollection(async: AsyncCollection) { spec: collection.Seq[LookupInSpec], options: LookupInAllReplicasOptions ): SFlux[LookupInReplicaResult] = { - convert(kvOps.subdocGetAllReplicasReactive(convert(options), id, LookupInSpec.map(spec).asJava)) - .map(result => convertLookupInReplica(result, environment)) + convert( + kvOps.subdocGetAllReplicasReactive( + convert(options), + id, + LookupInSpec.map(spec).asJava, + CoreReadPreference.NO_PREFERENCE + ) + ).map(result => convertLookupInReplica(result, environment)) } /** Retrieves any available version of the document. @@ -478,8 +500,9 @@ class ReactiveCollection(async: AsyncCollection) { id: String, timeout: Duration = kvReadTimeout ): SMono[GetReplicaResult] = { - convert(kvOps.getAnyReplicaReactive(makeCommonOptions(timeout), id)) - .map(result => convertReplica(result, environment, None)) + convert( + kvOps.getAnyReplicaReactive(makeCommonOptions(timeout), id, CoreReadPreference.NO_PREFERENCE) + ).map(result => convertReplica(result, environment, None)) } /** Retrieves any available version of the document. @@ -489,7 +512,7 @@ class ReactiveCollection(async: AsyncCollection) { id: String, options: GetAnyReplicaOptions ): SMono[GetReplicaResult] = { - convert(kvOps.getAnyReplicaReactive(convert(options), id)) + convert(kvOps.getAnyReplicaReactive(convert(options), id, CoreReadPreference.NO_PREFERENCE)) .map(result => convertReplica(result, environment, options.transcoder)) } @@ -500,8 +523,9 @@ class ReactiveCollection(async: AsyncCollection) { id: String, timeout: Duration = kvReadTimeout ): SFlux[GetReplicaResult] = { - convert(kvOps.getAllReplicasReactive(makeCommonOptions(timeout), id)) - .map(result => convertReplica(result, environment, None)) + convert( + kvOps.getAllReplicasReactive(makeCommonOptions(timeout), id, CoreReadPreference.NO_PREFERENCE) + ).map(result => convertReplica(result, environment, None)) } /** Retrieves all available versions of the document. @@ -511,7 +535,7 @@ class ReactiveCollection(async: AsyncCollection) { id: String, options: GetAllReplicasOptions ): SFlux[GetReplicaResult] = { - convert(kvOps.getAllReplicasReactive(convert(options), id)) + convert(kvOps.getAllReplicasReactive(convert(options), id, CoreReadPreference.NO_PREFERENCE)) .map(result => convertReplica(result, environment, options.transcoder)) } diff --git a/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncCollectionQueryIndexManager.scala b/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncCollectionQueryIndexManager.scala index c15b1300a..ab1595540 100644 --- a/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncCollectionQueryIndexManager.scala +++ b/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncCollectionQueryIndexManager.scala @@ -46,7 +46,7 @@ class AsyncCollectionQueryIndexManager( ) { private[scala] val internal = new CoreCollectionQueryIndexManager( collection.couchbaseOps.queryOps(), - collection.environment.core.requestTracer, + collection.core.coreResources.requestTracer, keyspace ) private[scala] val DefaultTimeout: Duration = diff --git a/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncQueryIndexManager.scala b/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncQueryIndexManager.scala index d7185395a..a713a0d39 100644 --- a/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncQueryIndexManager.scala +++ b/scala-client/src/main/scala/com/couchbase/client/scala/manager/query/AsyncQueryIndexManager.scala @@ -55,7 +55,7 @@ class AsyncQueryIndexManager(private[scala] val cluster: AsyncCluster)( ) { private[scala] val internal = new CoreQueryIndexManager( cluster.couchbaseOps.queryOps(), - cluster.env.core.requestTracer() + cluster.core.coreResources.requestTracer ) private[scala] val DefaultTimeout = diff --git a/scala-fit-performer/pom.xml b/scala-fit-performer/pom.xml index 78ce4b4ef..a5702f05a 100644 --- a/scala-fit-performer/pom.xml +++ b/scala-fit-performer/pom.xml @@ -5,7 +5,7 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 fit-performer-scala diff --git a/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/search/SearchHelper.scala b/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/search/SearchHelper.scala index 68e44f823..6cef1a97c 100644 --- a/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/search/SearchHelper.scala +++ b/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/search/SearchHelper.scala @@ -219,14 +219,11 @@ object SearchHelper { opts = opts.facets(facets) } if (o.hasTimeoutMillis) { - // [start:1.4.5] + // [if:1.4.5] opts = opts.timeout(Duration(o.getTimeoutMillis, TimeUnit.MILLISECONDS)) - // [end:1.4.5] - // [start:<1.4.5] - /* - throw new UnsupportedOperationException() - // [end:<1.4.5] - */ + // [else] + //? throw new UnsupportedOperationException() + // [end] } if (o.hasParentSpanId) throw new UnsupportedOperationException() if (o.getRawCount > 0) opts = opts.raw(o.getRawMap.asScala.toMap) @@ -453,7 +450,7 @@ object SearchHelper { result } - // [start:1.6.0] + // [if:1.6.0] def handleSearchBlocking( cluster: Cluster, command: com.couchbase.client.protocol.sdk.search.SearchWrapper @@ -533,8 +530,17 @@ object SearchHelper { def convertVectorQuery( vq: com.couchbase.client.protocol.sdk.search.VectorQuery ): com.couchbase.client.scala.search.vector.VectorQuery = { - val query: Array[Float] = vq.getVectorQueryList.asScala.toArray.map(v => v.asInstanceOf[Float]) - var out = com.couchbase.client.scala.search.vector.VectorQuery(vq.getVectorFieldName, query) + // [if:1.6.2] + var out = if (vq.hasBase64VectorQuery) { + com.couchbase.client.scala.search.vector.VectorQuery(vq.getVectorFieldName, vq.getBase64VectorQuery) + } else { + val query: Array[Float] = vq.getVectorQueryList.asScala.toArray.map(v => v.asInstanceOf[Float]) + com.couchbase.client.scala.search.vector.VectorQuery(vq.getVectorFieldName, query) + } + // [else] + //? val query: Array[Float] = vq.getVectorQueryList.asScala.toArray.map(v => v.asInstanceOf[Float]) + //? var out = com.couchbase.client.scala.search.vector.VectorQuery(vq.getVectorFieldName, query) + // [end] if (vq.hasOptions) { val opts = vq.getOptions if (opts.hasNumCandidates) out = out.numCandidates(opts.getNumCandidates) @@ -561,7 +567,7 @@ object SearchHelper { } } - // [end:1.6.0] + // [end] private def convertResult( result: SearchResult, @@ -732,7 +738,7 @@ object SearchHelper { } else if (command.hasUpsertIndex) { val req = command.getUpsertIndex - // [start:1.4.5] + // [if:1.4.5] val converted = SearchIndex.fromJson(req.getIndexDefinition.toStringUtf8).get result.setInitiated(getTimeNow) val start = System.nanoTime @@ -747,12 +753,9 @@ object SearchHelper { .get result.setElapsedNanos(System.nanoTime - start) setSuccess(result) - // [end:1.4.5] - // [start:<1.4.5] - /* - throw new UnsupportedOperationException() - // [end:<1.4.5] - */ + // [else] + //? throw new UnsupportedOperationException() + // [end] } else if (command.hasDropIndex) { val req = command.getDropIndex diff --git a/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/Capabilities.scala b/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/Capabilities.scala index 55da511f5..d16c07c47 100644 --- a/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/Capabilities.scala +++ b/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/Capabilities.scala @@ -74,6 +74,10 @@ object Capabilities { out.add(Caps.SDK_VECTOR_SEARCH) // [end:1.6.0] + // [start:1.6.2] + out.add(Caps.SDK_VECTOR_SEARCH_BASE64) + // [end:1.6.2] + out } diff --git a/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/OptionsUtil.scala b/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/OptionsUtil.scala index fa4e31f49..c3d120f50 100644 --- a/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/OptionsUtil.scala +++ b/scala-fit-performer/src/main/scala/com/couchbase/client/performer/scala/util/OptionsUtil.scala @@ -390,6 +390,7 @@ object OptionsUtil { if (qo.hasPipelineCap) queryOptions = queryOptions.pipelineCap(qo.getPipelineCap) if (qo.hasPipelineBatch) queryOptions = queryOptions.pipelineBatch(qo.getPipelineBatch) if (qo.hasScanCap) queryOptions = queryOptions.scanCap(qo.getScanCap) + if (qo.hasClientContextId) queryOptions = queryOptions.clientContextId(qo.getClientContextId) } Option(queryOptions) } diff --git a/scala-implicits/pom.xml b/scala-implicits/pom.xml index 1462ac613..bff11bdc6 100644 --- a/scala-implicits/pom.xml +++ b/scala-implicits/pom.xml @@ -7,11 +7,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 scala-implicits_${scala.compat.version} - 1.7.3 + 1.7.6 jar Couchbase Scala SDK Implicits diff --git a/test-utils/pom.xml b/test-utils/pom.xml index bb8e8ee4d..ded5db1f5 100644 --- a/test-utils/pom.xml +++ b/test-utils/pom.xml @@ -6,12 +6,12 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 Couchbase (Integration) Test Utilities test-utils - 1.7.3 + 1.7.6 diff --git a/tracing-micrometer-observation/pom.xml b/tracing-micrometer-observation/pom.xml index 6fd55d158..977e94a2b 100644 --- a/tracing-micrometer-observation/pom.xml +++ b/tracing-micrometer-observation/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 tracing-micrometer-observation - 1.5.3 + 1.5.6 Micrometer Observation Interoperability Provides interoperability with Micrometer Observation diff --git a/tracing-micrometer-observation/src/integrationTest/resources/integration.properties b/tracing-micrometer-observation/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/tracing-micrometer-observation/src/integrationTest/resources/integration.properties +++ b/tracing-micrometer-observation/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/tracing-opentelemetry-deps/pom.xml b/tracing-opentelemetry-deps/pom.xml index 5e757d4ec..8b8076bc1 100644 --- a/tracing-opentelemetry-deps/pom.xml +++ b/tracing-opentelemetry-deps/pom.xml @@ -9,7 +9,7 @@ com.couchbase.client tracing-opentelemetry-deps - 1.5.3 + 1.5.6 jar OpenTelemetry Interoperability Dependencies diff --git a/tracing-opentelemetry/pom.xml b/tracing-opentelemetry/pom.xml index 306d12423..94b293fef 100644 --- a/tracing-opentelemetry/pom.xml +++ b/tracing-opentelemetry/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 tracing-opentelemetry - 1.5.3 + 1.5.6 OpenTelemetry Interoperability Provides interoperability with OpenTelemetry @@ -24,7 +24,7 @@ com.couchbase.client tracing-opentelemetry-deps - 1.5.3 + 1.5.6 diff --git a/tracing-opentelemetry/src/integrationTest/resources/integration.properties b/tracing-opentelemetry/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/tracing-opentelemetry/src/integrationTest/resources/integration.properties +++ b/tracing-opentelemetry/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port diff --git a/tracing-opentracing/pom.xml b/tracing-opentracing/pom.xml index 3882afb80..8b223b7a0 100644 --- a/tracing-opentracing/pom.xml +++ b/tracing-opentracing/pom.xml @@ -6,11 +6,11 @@ com.couchbase.client couchbase-jvm-clients - 1.16.3 + 1.16.6 tracing-opentracing - 1.5.3 + 1.5.6 OpenTracing Interoperability Provides interoperability with OpenTracing diff --git a/tracing-opentracing/src/integrationTest/resources/integration.properties b/tracing-opentracing/src/integrationTest/resources/integration.properties index cf416415f..f48ab5a3d 100644 --- a/tracing-opentracing/src/integrationTest/resources/integration.properties +++ b/tracing-opentracing/src/integrationTest/resources/integration.properties @@ -11,7 +11,7 @@ cluster.adminPassword=password # Default configs for the mocked environment cluster.mocked.numNodes=1 -cluster.mocked.numReplicas=1 +cluster.mocked.numReplicas=0 # Entry point configuration if not managed # value of hostname and ns_server port