entry 'jetty-server'
entry 'jetty-servlet'
}
- dependency('org.elasticsearch.client:transport:5.6.3') {
+ dependency('org.elasticsearch.client:transport:6.6.2') {
exclude 'org.elasticsearch.plugin:lang-mustache-client'
- exclude 'org.elasticsearch.plugin:transport-netty3-client'
exclude 'commons-logging:commons-logging'
exclude 'org.elasticsearch.plugin:reindex-client'
+ exclude 'org.elasticsearch.plugin:rank-eval-client'
}
+ dependency('org.elasticsearch:mocksocket:1.0')
+ // at this date, version 6.6.2 of this module has not been published to Maven central
+ dependency('org.codelibs.elasticsearch.module:analysis-common:6.6.1')
dependency 'org.freemarker:freemarker:2.3.20'
dependency 'org.hamcrest:hamcrest-all:1.3'
dependency 'org.mindrot:jbcrypt:0.4'
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.Lock;
-import org.apache.logging.log4j.Logger;
-import org.elasticsearch.common.logging.Loggers;
import org.picocontainer.Startable;
+import org.sonar.api.utils.log.Logger;
+import org.sonar.api.utils.log.Loggers;
import org.sonar.ce.taskprocessor.CeWorker;
import org.sonar.ce.taskprocessor.CeWorkerFactory;
import org.sonar.process.cluster.hz.HazelcastMember;
* Provide the set of worker's UUID in a clustered SonarQube instance
*/
public class CeDistributedInformationImpl implements CeDistributedInformation, Startable {
- private static final Logger LOGGER = Loggers.getLogger(CeDistributedInformationImpl.class);
+ private static final Logger LOGGER = Loggers.get(CeDistributedInformationImpl.class);
private final HazelcastMember hazelcastMember;
private final CeWorkerFactory ceCeWorkerFactory;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.transport.Netty4Plugin;
import org.slf4j.Logger;
boolean connectedToOneHost = false;
for (HostAndPort hostAndPort : hostAndPorts) {
try {
- addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(hostAndPort.getHostText()), hostAndPort.getPortOrDefault(9001)));
+ addTransportAddress(new TransportAddress(InetAddress.getByName(hostAndPort.getHostText()), hostAndPort.getPortOrDefault(9001)));
connectedToOneHost = true;
} catch (UnknownHostException e) {
LOG.debug("Can not resolve host [" + hostAndPort.getHostText() + "]", e);
configurations {
tests
+
+ testCompile.extendsFrom tests
}
dependencies {
compileOnly 'com.google.code.findbugs:jsr305'
+ // "tests" dependencies are pulled by other modules which depends on "tests" configuration, "testCompile" are not pulled
+ tests 'org.codelibs.elasticsearch.module:analysis-common'
+ tests 'org.elasticsearch:mocksocket'
+
testCompile 'ch.qos.logback:logback-core'
testCompile 'com.google.code.findbugs:jsr305'
testCompile 'com.h2database:h2'
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.document.DocumentField;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.SearchHit;
-import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.sort.SortOrder;
import org.sonar.api.utils.log.Logger;
import org.sonar.api.utils.log.Loggers;
while (true) {
SearchHit[] hits = searchResponse.getHits().getHits();
for (SearchHit hit : hits) {
- SearchHitField routing = hit.getField("_routing");
+ DocumentField routing = hit.field("_routing");
DeleteRequestBuilder deleteRequestBuilder = client.prepareDelete(hit.getIndex(), hit.getType(), hit.getId());
if (routing != null) {
deleteRequestBuilder.setRouting(routing.getValue());
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.join.ParentJoinPlugin;
import org.elasticsearch.percolator.PercolatorPlugin;
private static void addHostToClient(HostAndPort host, TransportClient client) {
try {
- client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(host.getHostText()), host.getPortOrDefault(9001)));
+ client.addTransportAddress(new TransportAddress(InetAddress.getByName(host.getHostText()), host.getPortOrDefault(9001)));
} catch (UnknownHostException e) {
throw new IllegalStateException("Can not resolve host [" + host + "]", e);
}
public static <D extends BaseDoc> List<D> convertToDocs(SearchHits hits, Function<Map<String, Object>, D> converter) {
List<D> docs = new ArrayList<>();
for (SearchHit hit : hits.getHits()) {
- docs.add(converter.apply(hit.getSource()));
+ docs.add(converter.apply(hit.getSourceAsMap()));
}
return docs;
}
static String of(BuiltIndex<?> index) {
IndexType.IndexMainType mainType = index.getMainType();
return of(
+ index.getSettings().toString(),
ImmutableMap.of(mainType.getIndex(), mainType),
index.getRelationTypes().stream().collect(uniqueIndex(IndexType.IndexRelationType::getName, t -> t)),
- index.getSettings().getAsMap(),
index.getAttributes());
}
- private static String of(Map... maps) {
- StringBuilder sb = new StringBuilder();
- for (Map map : maps) {
+ private static String of(String str, Map<?,?>... maps) {
+ StringBuilder sb = new StringBuilder(str);
+ for (Map<?,?> map : maps) {
appendMap(sb, map);
}
return DigestUtils.sha256Hex(sb.toString());
}
- private static void appendMap(StringBuilder sb, Map attributes) {
+ private static void appendMap(StringBuilder sb, Map<?,?> attributes) {
for (Object entry : sort(attributes).entrySet()) {
sb.append(((Map.Entry) entry).getKey());
sb.append(DELIMITER);
}
}
- private static SortedMap sort(Map map) {
+ private static SortedMap<?,?> sort(Map<?,?> map) {
if (map instanceof ImmutableSortedMap) {
- return (ImmutableSortedMap) map;
+ return (ImmutableSortedMap<?,?>) map;
}
return ImmutableSortedMap.copyOf(map);
}
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
-import org.elasticsearch.search.aggregations.bucket.terms.Terms;
-import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
-import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import static java.lang.Math.max;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
private static final int FACET_DEFAULT_MIN_DOC_COUNT = 1;
private static final int FACET_DEFAULT_SIZE = 10;
- private static final Order FACET_DEFAULT_ORDER = Terms.Order.count(false);
+ private static final BucketOrder FACET_DEFAULT_ORDER = BucketOrder.count(false);
/** In some cases the user selects >15 items for one facet. In that case, we want to calculate the doc count for all of them (not just the first 15 items, which would be the
* default for the TermsAggregation). */
private static final int MAXIMUM_NUMBER_OF_SELECTED_ITEMS_WHOSE_DOC_COUNT_WILL_BE_CALCULATED = 50;
private final QueryBuilder query;
private final Map<String, QueryBuilder> filters;
private final AbstractAggregationBuilder subAggregation;
- private final Order order;
+ private final BucketOrder order;
public StickyFacetBuilder(QueryBuilder query, Map<String, QueryBuilder> filters) {
this(query, filters, null, FACET_DEFAULT_ORDER);
}
- public StickyFacetBuilder(QueryBuilder query, Map<String, QueryBuilder> filters, @Nullable AbstractAggregationBuilder subAggregation, @Nullable Order order) {
+ public StickyFacetBuilder(QueryBuilder query, Map<String, QueryBuilder> filters, @Nullable AbstractAggregationBuilder subAggregation, @Nullable BucketOrder order) {
this.query = query;
this.filters = filters;
this.subAggregation = subAggregation;
import java.util.Optional;
import org.elasticsearch.action.get.GetRequestBuilder;
import org.elasticsearch.action.get.GetResponse;
-import org.elasticsearch.index.get.GetField;
+import org.elasticsearch.common.document.DocumentField;
import org.sonar.server.es.EsClient;
import org.sonar.server.es.Index;
import org.sonar.server.es.IndexType;
.setStoredFields(MetadataIndexDefinition.FIELD_VALUE);
GetResponse response = request.get();
if (response.isExists()) {
- GetField field = response.getField(MetadataIndexDefinition.FIELD_VALUE);
- String value = String.valueOf(field.getValue());
- return Optional.of(value);
+ DocumentField field = response.getField(MetadataIndexDefinition.FIELD_VALUE);
+ return Optional.of(field.getValue());
}
return Optional.empty();
}
set(TYPE, "nGram");
set(MIN_GRAM, MINIMUM_NGRAM_LENGTH);
set(MAX_GRAM, MAXIMUM_NGRAM_LENGTH);
- setArray("token_chars", "letter", "digit", "punctuation", "symbol");
+ setList("token_chars", "letter", "digit", "punctuation", "symbol");
}
},
set(TYPE, "nGram");
set(MIN_GRAM, MINIMUM_NGRAM_LENGTH);
set(MAX_GRAM, MAXIMUM_NGRAM_LENGTH);
- setArray("token_chars", "letter", "digit", "punctuation", "symbol");
+ setList("token_chars", "letter", "digit", "punctuation", "symbol");
}
},
PREFIX_TOKENIZER(TOKENIZER) {
@Override
protected void setup() {
set(TOKENIZER, KEYWORD);
- setArray(FILTER, TRIM, LOWERCASE);
+ setList(FILTER, TRIM, LOWERCASE);
}
@Override
@Override
protected void setup() {
set(TOKENIZER, GRAM_TOKENIZER);
- setArray(FILTER, TRIM, LOWERCASE);
+ setList(FILTER, TRIM, LOWERCASE);
}
},
SEARCH_GRAMS_ANALYZER(ANALYZER) {
@Override
protected void setup() {
set(TOKENIZER, WHITESPACE);
- setArray(FILTER, TRIM, LOWERCASE);
+ setList(FILTER, TRIM, LOWERCASE);
}
@Override
@Override
protected void setup() {
set(TOKENIZER, PREFIX_TOKENIZER);
- setArray(FILTER, TRIM);
+ setList(FILTER, TRIM);
}
},
SEARCH_PREFIX_ANALYZER(ANALYZER) {
@Override
protected void setup() {
set(TOKENIZER, WHITESPACE);
- setArray(FILTER, TRIM);
+ setList(FILTER, TRIM);
}
@Override
@Override
protected void setup() {
set(TOKENIZER, PREFIX_TOKENIZER);
- setArray(FILTER, TRIM, LOWERCASE);
+ setList(FILTER, TRIM, LOWERCASE);
}
},
SEARCH_PREFIX_CASE_INSENSITIVE_ANALYZER(ANALYZER) {
@Override
protected void setup() {
set(TOKENIZER, WHITESPACE);
- setArray(FILTER, TRIM, LOWERCASE);
+ setList(FILTER, TRIM, LOWERCASE);
}
@Override
@Override
protected void setup() {
set(TOKENIZER, WHITESPACE);
- setArray(FILTER, TRIM, LOWERCASE, NGRAM_FILTER.getName());
+ setList(FILTER, TRIM, LOWERCASE, NGRAM_FILTER.getName());
}
},
USER_SEARCH_GRAMS_ANALYZER(ANALYZER) {
@Override
protected void setup() {
set(TOKENIZER, WHITESPACE);
- setArray(FILTER, TRIM, LOWERCASE);
+ setList(FILTER, TRIM, LOWERCASE);
}
@Override
@Override
protected void setup() {
set(TOKENIZER, STANDARD);
- setArray(FILTER, STANDARD, "word_filter", LOWERCASE, STOP, ASCIIFOLDING, PORTER_STEM);
+ setList(FILTER, STANDARD, "word_filter", LOWERCASE, STOP, ASCIIFOLDING, PORTER_STEM);
}
},
SEARCH_WORDS_ANALYZER(ANALYZER) {
@Override
protected void setup() {
set(TOKENIZER, STANDARD);
- setArray(FILTER, STANDARD, LOWERCASE, STOP, ASCIIFOLDING, PORTER_STEM);
+ setList(FILTER, STANDARD, LOWERCASE, STOP, ASCIIFOLDING, PORTER_STEM);
}
@Override
@Override
protected void setup() {
set(TOKENIZER, STANDARD);
- setArray(FILTER, STANDARD, LOWERCASE, STOP, ASCIIFOLDING, PORTER_STEM);
- setArray(CHAR_FILTER, HTML_STRIP);
+ setList(FILTER, STANDARD, LOWERCASE, STOP, ASCIIFOLDING, PORTER_STEM);
+ setList(CHAR_FILTER, HTML_STRIP);
}
@Override
@Override
protected void setup() {
set(TOKENIZER, UUID_MODULE_TOKENIZER);
- setArray(FILTER, TRIM);
+ setList(FILTER, TRIM);
}
},
put(localName(settingSuffix), value);
}
- protected void setArray(String settingSuffix, String... values) {
- putArray(localName(settingSuffix), values);
+ protected void setList(String settingSuffix, String... values) {
+ putList(localName(settingSuffix), values);
}
- protected void setArray(String settingSuffix, DefaultIndexSettingsElement... values) {
- putArray(localName(settingSuffix), Arrays.stream(values).map(DefaultIndexSettingsElement::getName).toArray(String[]::new));
+ protected void setList(String settingSuffix, DefaultIndexSettingsElement... values) {
+ putList(localName(settingSuffix), Arrays.stream(values).map(DefaultIndexSettingsElement::getName).toArray(String[]::new));
}
private void put(String setting, String value) {
builder = builder.put(setting, value);
}
- private void putArray(String setting, String... values) {
- builder = builder.putArray(setting, values);
+ private void putList(String setting, String... values) {
+ builder = builder.putList(setting, values);
}
private String localName(String settingSuffix) {
private void applySettingsConfiguration(SettingsConfiguration settingsConfiguration) {
settings.put("index.mapper.dynamic", valueOf(false));
settings.put("index.refresh_interval", refreshInterval(settingsConfiguration));
- settings.put("mapping.single_type", valueOf(true));
Configuration config = settingsConfiguration.getConfiguration();
boolean clusterMode = config.getBoolean(CLUSTER_ENABLED.getKey()).orElse(false);
request.settings().toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject().endObject();
builder.prettyPrint();
- return builder.string();
+ return builder.toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
package org.sonar.server.es.request;
import org.apache.commons.lang.StringUtils;
-import org.elasticsearch.action.ListenableActionFuture;
+import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
-import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.unit.TimeValue;
import org.sonar.api.utils.log.Profiler;
}
@Override
- public PutMappingResponse get() {
+ public AcknowledgedResponse get() {
Profiler profiler = Profiler.createIfTrace(EsClient.LOGGER).start();
try {
return super.execute().actionGet();
}
@Override
- public PutMappingResponse get(TimeValue timeout) {
+ public AcknowledgedResponse get(TimeValue timeout) {
throw new IllegalStateException("Not yet implemented");
}
@Override
- public PutMappingResponse get(String timeout) {
+ public AcknowledgedResponse get(String timeout) {
throw new IllegalStateException("Not yet implemented");
}
@Override
- public ListenableActionFuture<PutMappingResponse> execute() {
+ public ActionFuture<AcknowledgedResponse> execute() {
throw new UnsupportedOperationException("execute() should not be called as it's used for asynchronous");
}
import org.elasticsearch.join.query.JoinQueryBuilders;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
-import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
-import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
}
private static StickyFacetBuilder stickyFacetBuilder(QueryBuilder query, Map<String, QueryBuilder> filters) {
- return new StickyFacetBuilder(query, filters, null, Terms.Order.compound(Terms.Order.count(false), Terms.Order.term(true)));
+ return new StickyFacetBuilder(query, filters, null, BucketOrder.compound(BucketOrder.count(false), BucketOrder.key(true)));
}
private static void setSorting(RuleQuery query, SearchRequestBuilder esSearch) {
TermsAggregationBuilder termsAggregation = AggregationBuilders.terms(AGGREGATION_NAME_FOR_TAGS)
.field(FIELD_RULE_EXTENSION_TAGS)
.size(size)
- .order(Terms.Order.term(true))
+ .order(BucketOrder.key(true))
.minDocCount(1);
ofNullable(query)
.map(EsUtils::escapeSpecialRegexChars)
--- /dev/null
+/*
+ * SonarQube
+ * Copyright (C) 2009-2019 SonarSource SA
+ * mailto:info AT sonarsource DOT com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+package org.elasticsearch.transport;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.cli.SuppressForbidden;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.concurrent.CompletableContext;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.CancellableThreads;
+import org.elasticsearch.common.util.PageCacheRecycler;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.core.internal.io.IOUtils;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.mocksocket.MockServerSocket;
+import org.elasticsearch.mocksocket.MockSocket;
+import org.elasticsearch.threadpool.ThreadPool;
+
+/**
+ * This is a socket based blocking TcpTransport implementation that is used for tests
+ * that need real networking. This implementation is a test only implementation that implements
+ * the networking layer in the worst possible way since it blocks and uses a thread per request model.
+ */
+public class MockTcpTransport extends TcpTransport {
+ private static final Logger logger = LogManager.getLogger(MockTcpTransport.class);
+
+ /**
+ * A pre-built light connection profile that shares a single connection across all
+ * types.
+ */
+ static final ConnectionProfile LIGHT_PROFILE;
+
+ private final Set<MockChannel> openChannels = new HashSet<>();
+
+ static {
+ ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
+ builder.addConnections(1,
+ TransportRequestOptions.Type.BULK,
+ TransportRequestOptions.Type.PING,
+ TransportRequestOptions.Type.RECOVERY,
+ TransportRequestOptions.Type.REG,
+ TransportRequestOptions.Type.STATE);
+ LIGHT_PROFILE = builder.build();
+ }
+
+ private final ExecutorService executor;
+
+ public MockTcpTransport(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
+ this(settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService, Version.CURRENT);
+ }
+
+ public MockTcpTransport(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
+ CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService, Version mockVersion) {
+ super("mock-tcp-transport", settings, mockVersion, threadPool, PageCacheRecycler.NON_RECYCLING_INSTANCE, circuitBreakerService,
+ namedWriteableRegistry, networkService);
+ // we have our own crazy cached threadpool this one is not bounded at all...
+ // using the ES thread factory here is crucial for tests otherwise disruption tests won't block that thread
+ executor = Executors.newCachedThreadPool(EsExecutors.daemonThreadFactory(settings, Transports.TEST_MOCK_TRANSPORT_THREAD_PREFIX));
+ }
+
+ @Override
+ protected MockChannel bind(final String name, InetSocketAddress address) throws IOException {
+ MockServerSocket socket = new MockServerSocket();
+ socket.setReuseAddress(TransportSettings.TCP_REUSE_ADDRESS.get(settings));
+ ByteSizeValue tcpReceiveBufferSize = TransportSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings);
+ if (tcpReceiveBufferSize.getBytes() > 0) {
+ socket.setReceiveBufferSize(tcpReceiveBufferSize.bytesAsInt());
+ }
+ socket.bind(address);
+ MockChannel serverMockChannel = new MockChannel(socket, name);
+ CountDownLatch started = new CountDownLatch(1);
+ executor.execute(new AbstractRunnable() {
+ @Override
+ public void onFailure(Exception e) {
+ onException(serverMockChannel, e);
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ started.countDown();
+ serverMockChannel.accept(executor);
+ }
+ });
+ try {
+ started.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ return serverMockChannel;
+ }
+
+ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOException {
+ Socket socket = mockChannel.activeChannel;
+ byte[] minimalHeader = new byte[TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE];
+ try {
+ input.readFully(minimalHeader);
+ } catch (EOFException eof) {
+ throw new IOException("Connection reset by peer");
+ }
+
+ // Read message length will throw stream corrupted exception if the marker bytes incorrect
+ int msgSize = TcpTransport.readMessageLength(new BytesArray(minimalHeader));
+ if (msgSize == -1) {
+ socket.getOutputStream().flush();
+ } else {
+ final byte[] buffer = new byte[msgSize];
+ input.readFully(buffer);
+ int expectedSize = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE + msgSize;
+ try (BytesStreamOutput output = new ReleasableBytesStreamOutput(expectedSize, bigArrays)) {
+ output.write(minimalHeader);
+ output.write(buffer);
+ consumeNetworkReads(mockChannel, output.bytes());
+ }
+ }
+ }
+
+ @Override
+ @SuppressForbidden(reason = "real socket for mocking remote connections")
+ protected MockChannel initiateChannel(DiscoveryNode node) throws IOException {
+ InetSocketAddress address = node.getAddress().address();
+ final MockSocket socket = new MockSocket();
+ final MockChannel channel = new MockChannel(socket, address, false, "none");
+
+ boolean success = false;
+ try {
+ configureSocket(socket);
+ success = true;
+ } finally {
+ if (success == false) {
+ IOUtils.close(socket);
+ }
+ }
+
+ executor.submit(() -> {
+ try {
+ socket.connect(address);
+ socket.setSoLinger(false, 0);
+ channel.connectFuture.complete(null);
+ channel.loopRead(executor);
+ } catch (Exception ex) {
+ channel.connectFuture.completeExceptionally(ex);
+ }
+ });
+
+ return channel;
+ }
+
+ @Override
+ protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile connectionProfile) {
+ ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
+ Set<TransportRequestOptions.Type> allTypesWithConnection = new HashSet<>();
+ Set<TransportRequestOptions.Type> allTypesWithoutConnection = new HashSet<>();
+ for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile.getHandles()) {
+ Set<TransportRequestOptions.Type> types = handle.getTypes();
+ if (handle.length > 0) {
+ allTypesWithConnection.addAll(types);
+ } else {
+ allTypesWithoutConnection.addAll(types);
+ }
+ }
+ // make sure we maintain at least the types that are supported by this profile even if we only use a single channel for them.
+ builder.addConnections(1, allTypesWithConnection.toArray(new TransportRequestOptions.Type[0]));
+ if (allTypesWithoutConnection.isEmpty() == false) {
+ builder.addConnections(0, allTypesWithoutConnection.toArray(new TransportRequestOptions.Type[0]));
+ }
+ builder.setHandshakeTimeout(connectionProfile.getHandshakeTimeout());
+ builder.setConnectTimeout(connectionProfile.getConnectTimeout());
+ builder.setPingInterval(connectionProfile.getPingInterval());
+ builder.setCompressionEnabled(connectionProfile.getCompressionEnabled());
+ return builder.build();
+ }
+
+ private void configureSocket(Socket socket) throws SocketException {
+ socket.setTcpNoDelay(TransportSettings.TCP_NO_DELAY.get(settings));
+ ByteSizeValue tcpSendBufferSize = TransportSettings.TCP_SEND_BUFFER_SIZE.get(settings);
+ if (tcpSendBufferSize.getBytes() > 0) {
+ socket.setSendBufferSize(tcpSendBufferSize.bytesAsInt());
+ }
+ ByteSizeValue tcpReceiveBufferSize = TransportSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings);
+ if (tcpReceiveBufferSize.getBytes() > 0) {
+ socket.setReceiveBufferSize(tcpReceiveBufferSize.bytesAsInt());
+ }
+ socket.setReuseAddress(TransportSettings.TCP_REUSE_ADDRESS.get(settings));
+ }
+
+ public final class MockChannel implements Closeable, TcpChannel, TcpServerChannel {
+ private final AtomicBoolean isOpen = new AtomicBoolean(true);
+ private final InetSocketAddress localAddress;
+ private final ServerSocket serverSocket;
+ private final Set<MockChannel> workerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>());
+ private final Socket activeChannel;
+ private final boolean isServer;
+ private final String profile;
+ private final CancellableThreads cancellableThreads = new CancellableThreads();
+ private final CompletableContext<Void> closeFuture = new CompletableContext<>();
+ private final CompletableContext<Void> connectFuture = new CompletableContext<>();
+ private final ChannelStats stats = new ChannelStats();
+
+ /**
+ * Constructs a new MockChannel instance intended for handling the actual incoming / outgoing traffic.
+ *
+ * @param socket The client socket. Mut not be null.
+ * @param localAddress Address associated with the corresponding local server socket. Must not be null.
+ * @param profile The associated profile name.
+ */
+ MockChannel(Socket socket, InetSocketAddress localAddress, boolean isServer, String profile) {
+ this.localAddress = localAddress;
+ this.activeChannel = socket;
+ this.isServer = isServer;
+ this.serverSocket = null;
+ this.profile = profile;
+ synchronized (openChannels) {
+ openChannels.add(this);
+ }
+ }
+
+ /**
+ * Constructs a new MockChannel instance intended for accepting requests.
+ *
+ * @param serverSocket The associated server socket. Must not be null.
+ * @param profile The associated profile name.
+ */
+ MockChannel(ServerSocket serverSocket, String profile) {
+ this.localAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress();
+ this.serverSocket = serverSocket;
+ this.profile = profile;
+ this.isServer = false;
+ this.activeChannel = null;
+ synchronized (openChannels) {
+ openChannels.add(this);
+ }
+ }
+
+ public void accept(Executor executor) throws IOException {
+ while (isOpen.get()) {
+ Socket incomingSocket = serverSocket.accept();
+ MockChannel incomingChannel = null;
+ try {
+ configureSocket(incomingSocket);
+ synchronized (this) {
+ if (isOpen.get()) {
+ InetSocketAddress localAddress = new InetSocketAddress(incomingSocket.getLocalAddress(),
+ incomingSocket.getPort());
+ incomingChannel = new MockChannel(incomingSocket, localAddress, true, profile);
+ MockChannel finalIncomingChannel = incomingChannel;
+ incomingChannel.addCloseListener(new ActionListener<Void>() {
+ @Override
+ public void onResponse(Void aVoid) {
+ workerChannels.remove(finalIncomingChannel);
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ workerChannels.remove(finalIncomingChannel);
+ }
+ });
+ serverAcceptedChannel(incomingChannel);
+ //establish a happens-before edge between closing and accepting a new connection
+ workerChannels.add(incomingChannel);
+
+ // this spawns a new thread immediately, so OK under lock
+ incomingChannel.loopRead(executor);
+ // the channel is properly registered and will be cleared by the close code.
+ incomingSocket = null;
+ incomingChannel = null;
+ }
+ }
+ } finally {
+ // ensure we don't leak sockets and channels in the failure case. Note that we null both
+ // if there are no exceptions so this becomes a no op.
+ IOUtils.closeWhileHandlingException(incomingSocket, incomingChannel);
+ }
+ }
+ }
+
+ void loopRead(Executor executor) {
+ executor.execute(new AbstractRunnable() {
+ @Override
+ public void onFailure(Exception e) {
+ if (isOpen.get()) {
+ try {
+ onException(MockChannel.this, e);
+ } catch (Exception ex) {
+ logger.warn("failed on handling exception", ex);
+ IOUtils.closeWhileHandlingException(MockChannel.this); // pure paranoia
+ }
+ }
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ StreamInput input = new InputStreamStreamInput(new BufferedInputStream(activeChannel.getInputStream()));
+ // There is a (slim) chance that we get interrupted right after a loop iteration, so check explicitly
+ while (isOpen.get() && !Thread.currentThread().isInterrupted()) {
+ cancellableThreads.executeIO(() -> readMessage(MockChannel.this, input));
+ }
+ }
+ });
+ }
+
+ synchronized void close0() throws IOException {
+ // establish a happens-before edge between closing and accepting a new connection
+ // we have to sync this entire block to ensure that our openChannels checks work correctly.
+ // The close block below will close all worker channels but if one of the worker channels runs into an exception
+ // for instance due to a disconnect the handling of this exception might be executed concurrently.
+ // now if we are in-turn concurrently call close we might not wait for the actual close to happen and that will, down the road
+ // make the assertion trip that not all channels are closed.
+ if (isOpen.compareAndSet(true, false)) {
+ final boolean removedChannel;
+ synchronized (openChannels) {
+ removedChannel = openChannels.remove(this);
+ }
+ IOUtils.close(serverSocket, activeChannel, () -> IOUtils.close(workerChannels),
+ () -> cancellableThreads.cancel("channel closed"));
+ assert removedChannel: "Channel was not removed or removed twice?";
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "MockChannel{" +
+ "profile='" + profile + '\'' +
+ ", isOpen=" + isOpen +
+ ", localAddress=" + localAddress +
+ ", isServerSocket=" + (serverSocket != null) +
+ '}';
+ }
+
+ @Override
+ public void close() {
+ try {
+ close0();
+ closeFuture.complete(null);
+ } catch (IOException e) {
+ closeFuture.completeExceptionally(e);
+ }
+ }
+
+ @Override
+ public String getProfile() {
+ return profile;
+ }
+
+ @Override
+ public boolean isServerChannel() {
+ return isServer;
+ }
+
+ @Override
+ public void addCloseListener(ActionListener<Void> listener) {
+ closeFuture.addListener(ActionListener.toBiConsumer(listener));
+ }
+
+ @Override
+ public void addConnectListener(ActionListener<Void> listener) {
+ connectFuture.addListener(ActionListener.toBiConsumer(listener));
+ }
+
+ @Override
+ public ChannelStats getChannelStats() {
+ return stats;
+ }
+
+ @Override
+ public boolean isOpen() {
+ return isOpen.get();
+ }
+
+ @Override
+ public InetSocketAddress getLocalAddress() {
+ return localAddress;
+ }
+
+ @Override
+ public InetSocketAddress getRemoteAddress() {
+ return (InetSocketAddress) activeChannel.getRemoteSocketAddress();
+ }
+
+ @Override
+ public void sendMessage(BytesReference reference, ActionListener<Void> listener) {
+ try {
+ synchronized (this) {
+ OutputStream outputStream = new BufferedOutputStream(activeChannel.getOutputStream());
+ reference.writeTo(outputStream);
+ outputStream.flush();
+ }
+ listener.onResponse(null);
+ } catch (IOException e) {
+ listener.onFailure(e);
+ onException(this, e);
+ }
+ }
+ }
+
+
+ @Override
+ protected void doStart() {
+ boolean success = false;
+ try {
+ if (NetworkService.NETWORK_SERVER.get(settings)) {
+ // loop through all profiles and start them up, special handling for default one
+ for (ProfileSettings profileSettings : profileSettings) {
+ bindServer(profileSettings);
+ }
+ }
+ super.doStart();
+ success = true;
+ } finally {
+ if (success == false) {
+ doStop();
+ }
+ }
+ }
+
+ @Override
+ protected void stopInternal() {
+ ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
+ synchronized (openChannels) {
+ assert openChannels.isEmpty() : "there are still open channels: " + openChannels;
+ }
+ }
+}
*/
package org.sonar.server.es;
-import java.util.Map;
+import org.elasticsearch.common.settings.Settings;
import org.junit.Test;
import org.sonar.server.es.newindex.DefaultIndexSettings;
import org.sonar.test.TestUtils;
@Test
public void defaults() {
- Map<String, String> map = DefaultIndexSettings.defaults().build().getAsMap();
- assertThat(map).isNotEmpty();
+ Settings settings = DefaultIndexSettings.defaults().build();
// test some values
- assertThat(map.get("index.number_of_shards")).isEqualTo("1");
- assertThat(map.get("index.analysis.analyzer." + SORTABLE_ANALYZER.getName() + ".tokenizer")).isEqualTo("keyword");
+ assertThat(settings.get("index.number_of_shards")).isEqualTo("1");
+ assertThat(settings.get("index.analysis.analyzer." + SORTABLE_ANALYZER.getName() + ".tokenizer")).isEqualTo("keyword");
}
@Test
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.lang.reflect.ConstructorUtils;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
-import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
-import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
-import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.analysis.common.CommonAnalysisPlugin;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.join.ParentJoinPlugin;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.NetworkPlugin;
+import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.MockTcpTransport;
+import org.elasticsearch.transport.Transport;
import org.junit.rules.ExternalResource;
import org.sonar.server.component.index.ComponentIndexDefinition;
import org.sonar.server.es.IndexDefinition.IndexDefinitionContext;
List<SearchHit> hits = getDocuments(indexType);
return new ArrayList<>(Collections2.transform(hits, input -> {
try {
- return (E) ConstructorUtils.invokeConstructor(docClass, input.getSource());
+ return (E) ConstructorUtils.invokeConstructor(docClass, input.getSourceAsMap());
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
private void setIndexSettings(String index, Map<String, Object> settings) {
- UpdateSettingsResponse response = SHARED_NODE.client().admin().indices()
+ AcknowledgedResponse response = SHARED_NODE.client().admin().indices()
.prepareUpdateSettings(index)
.setSettings(settings)
.get();
private static void deleteIndexIfExists(String name) {
try {
- DeleteIndexResponse response = SHARED_NODE.client().admin().indices().prepareDelete(name).get();
+ AcknowledgedResponse response = SHARED_NODE.client().admin().indices().prepareDelete(name).get();
checkState(response.isAcknowledged(), "Fail to drop the index " + name);
} catch (IndexNotFoundException e) {
// ignore
// create types
String typeName = index.getMainType().getType();
- PutMappingResponse mappingResponse = SHARED_NODE.client().admin().indices().preparePutMapping(indexName)
+ AcknowledgedResponse mappingResponse = SHARED_NODE.client().admin().indices().preparePutMapping(indexName)
.setType(typeName)
.setSource(index.getAttributes())
.get();
// from failing on nodes without enough disk space
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b")
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b")
+ .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "1b")
// always reduce this - it can make tests really slow
.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(20))
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "single-node")
.build();
- Node node = new TesterNode(settings);
+ Node node = new Node(InternalSettingsPreparer.prepareEnvironment(settings, null),
+ ImmutableList.of(
+ CommonAnalysisPlugin.class,
+ // mock local transport plugin
+ MockTcpTransportPlugin.class,
+ // install ParentJoin plugin required to create field of type "join"
+ ParentJoinPlugin.class),
+ true) {
+ @Override
+ protected void registerDerivedNodeNameWithLogger(String nodeName) {
+ // nothing to do
+ }
+ };
return node.start();
} catch (Exception e) {
throw new IllegalStateException("Fail to start embedded Elasticsearch", e);
}
}
- private static class TesterNode extends Node {
- public TesterNode(Settings preparedSettings) {
- super(
- InternalSettingsPreparer.prepareEnvironment(preparedSettings, null),
- ImmutableList.of(
- // install ParentJoin plugin required to create field of type "join"
- ParentJoinPlugin.class));
+ public static final class MockTcpTransportPlugin extends Plugin implements NetworkPlugin {
+ @Override
+ public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler,
+ CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) {
+ return Collections.singletonMap(
+ "local",
+ () -> new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, circuitBreakerService, namedWriteableRegistry, networkService));
}
}
}
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.junit.Test;
-import org.mockito.Mockito;
import org.sonar.server.issue.index.IssueDoc;
import org.sonar.test.TestUtils;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
import static org.sonar.server.es.EsUtils.escapeSpecialRegexChars;
public class EsUtilsTest {
@Test
public void convertToDocs_empty() {
- SearchHits hits = mock(SearchHits.class, Mockito.RETURNS_MOCKS);
+ SearchHits hits = new SearchHits(new SearchHit[] {}, 0, 0);
List<BaseDoc> docs = EsUtils.convertToDocs(hits, IssueDoc::new);
assertThat(docs).isEmpty();
}
@Test
public void convertToDocs() {
- SearchHits hits = mock(SearchHits.class, Mockito.RETURNS_MOCKS);
- when(hits.getHits()).thenReturn(new SearchHit[] {mock(SearchHit.class)});
+ SearchHits hits = new SearchHits(new SearchHit[] {new SearchHit(16)}, 1, 1);
List<BaseDoc> docs = EsUtils.convertToDocs(hits, IssueDoc::new);
assertThat(docs).hasSize(1);
}
assertThat(underTest.get("index.number_of_shards")).isNotEmpty();
assertThat(underTest.get("index.mapper.dynamic")).isEqualTo("false");
assertThat(underTest.get("index.refresh_interval")).isEqualTo("30s");
+ // setting "mapping.single_type" has been dropped in 6.X because multi type indices are not supported anymore
+ assertThat(underTest.get("mapping.single_type")).isNull();
assertThat(underTest.get("index.number_of_shards")).isEqualTo("1");
assertThat(underTest.get("index.number_of_replicas")).isEqualTo("0");
}
assertThat(underTest.get("index.number_of_shards")).isNotEmpty();
assertThat(underTest.get("index.mapper.dynamic")).isEqualTo("false");
assertThat(underTest.get("index.refresh_interval")).isEqualTo("30s");
+ // setting "mapping.single_type" has been dropped in 6.X because multi type indices are not supported anymore
+ assertThat(underTest.get("mapping.single_type")).isNull();
assertThat(underTest.get("index.number_of_shards")).isEqualTo("1");
assertThat(underTest.get("index.number_of_replicas")).isEqualTo("1");
}
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregationBuilders;
-import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;
-import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator.KeyedFilter;
-import org.elasticsearch.search.aggregations.bucket.filters.InternalFilters;
-import org.elasticsearch.search.aggregations.bucket.filters.InternalFilters.InternalBucket;
+import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter;
+import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters;
+import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters.InternalBucket;
import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits;
import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import java.util.stream.Collectors;
import org.apache.commons.lang.StringUtils;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
-import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.settings.Settings;
import org.picocontainer.Startable;
// create types
LOGGER.info("Create type {}", builtIndex.getMainType().format());
- PutMappingResponse mappingResponse = client.preparePutMapping(index)
+ AcknowledgedResponse mappingResponse = client.preparePutMapping(index)
.setType(builtIndex.getMainType().getType())
.setSource(builtIndex.getAttributes())
.get();
import org.elasticsearch.indices.TermsLookup;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.HasAggregations;
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter;
import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
-import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
-import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
+import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
import org.elasticsearch.search.aggregations.metrics.min.Min;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
private static final String FACET_SUFFIX_MISSING = "_missing";
private static final String IS_ASSIGNED_FILTER = "__isAssigned";
private static final SumAggregationBuilder EFFORT_AGGREGATION = AggregationBuilders.sum(FACET_MODE_EFFORT).field(FIELD_ISSUE_EFFORT);
- private static final Order EFFORT_AGGREGATION_ORDER = Order.aggregation(FACET_MODE_EFFORT, false);
+ private static final BucketOrder EFFORT_AGGREGATION_ORDER = BucketOrder.aggregation(FACET_MODE_EFFORT, false);
private static final Duration TWENTY_DAYS = Duration.standardDays(20L);
private static final Duration TWENTY_WEEKS = Duration.standardDays(20L * 7L);
private static final Duration TWENTY_MONTHS = Duration.standardDays(20L * 30L);
}
public List<String> searchTags(IssueQuery query, @Nullable String textQuery, int size) {
- Terms terms = listTermsMatching(FIELD_ISSUE_TAGS, query, textQuery, Terms.Order.term(true), size);
+ Terms terms = listTermsMatching(FIELD_ISSUE_TAGS, query, textQuery, BucketOrder.key(true), size);
return EsUtils.termsKeys(terms);
}
public Map<String, Long> countTags(IssueQuery query, int maxNumberOfTags) {
- Terms terms = listTermsMatching(FIELD_ISSUE_TAGS, query, null, Terms.Order.count(false), maxNumberOfTags);
+ Terms terms = listTermsMatching(FIELD_ISSUE_TAGS, query, null, BucketOrder.count(false), maxNumberOfTags);
return EsUtils.termsToMap(terms);
}
public List<String> searchAuthors(IssueQuery query, @Nullable String textQuery, int maxNumberOfAuthors) {
- Terms terms = listTermsMatching(FIELD_ISSUE_AUTHOR_LOGIN, query, textQuery, Terms.Order.term(true), maxNumberOfAuthors);
+ Terms terms = listTermsMatching(FIELD_ISSUE_AUTHOR_LOGIN, query, textQuery, BucketOrder.key(true), maxNumberOfAuthors);
return EsUtils.termsKeys(terms);
}
- private Terms listTermsMatching(String fieldName, IssueQuery query, @Nullable String textQuery, Terms.Order termsOrder, int size) {
+ private Terms listTermsMatching(String fieldName, IssueQuery query, @Nullable String textQuery, BucketOrder termsOrder, int size) {
SearchRequestBuilder requestBuilder = client
.prepareSearch(TYPE_ISSUE.getMainType())
// Avoids returning search hits
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
-import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator.KeyedFilter;
+import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter;
import org.elasticsearch.search.aggregations.bucket.nested.Nested;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
-import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.sonar.api.server.ServerSide;
.field(FIELD_LANGUAGES)
.size(MAX_PAGE_SIZE)
.minDocCount(1)
- .order(Terms.Order.count(false)));
+ .order(BucketOrder.count(false)));
request.addAggregation(AggregationBuilders.nested(FIELD_NCLOC_LANGUAGE_DISTRIBUTION, FIELD_NCLOC_LANGUAGE_DISTRIBUTION)
.subAggregation(AggregationBuilders.terms(FIELD_NCLOC_LANGUAGE_DISTRIBUTION + "_terms")
.field(FIELD_DISTRIB_LANGUAGE)
.size(MAX_PAGE_SIZE)
.minDocCount(1)
- .order(Terms.Order.count(false))
+ .order(BucketOrder.count(false))
.subAggregation(sum(FIELD_DISTRIB_NCLOC).field(FIELD_DISTRIB_NCLOC))));
request.addAggregation(AggregationBuilders.nested(NCLOC_KEY, FIELD_MEASURES)
}
private static AbstractAggregationBuilder createQualityGateFacet() {
- return AggregationBuilders.filters(
+ return filters(
ALERT_STATUS_KEY,
QUALITY_GATE_STATUS.entrySet().stream()
.map(entry -> new KeyedFilter(entry.getKey(), termQuery(FIELD_QUALITY_GATE_STATUS, entry.getValue())))
.field(FIELD_TAGS)
.size(size)
.minDocCount(1)
- .order(Terms.Order.term(true));
+ .order(BucketOrder.key(true));
if (textQuery != null) {
tagFacet.includeExclude(new IncludeExclude(".*" + escapeSpecialRegexChars(textQuery) + ".*", null));
}
setAttribute(protobuf, "Store Size", byteCountToDisplaySize(stats.getIndices().getStore().getSizeInBytes()));
setAttribute(protobuf, "Open File Descriptors", stats.getProcess().getOpenFileDescriptors());
setAttribute(protobuf, "Max File Descriptors", stats.getProcess().getMaxFileDescriptors());
- setAttribute(protobuf, "Spinning", stats.getFs().getTotal().getSpins());
setAttribute(protobuf, "JVM Heap Usage", formatPercent(stats.getJvm().getMem().getHeapUsedPercent()));
setAttribute(protobuf, "JVM Heap Used", byteCountToDisplaySize(stats.getJvm().getMem().getHeapUsed().getBytes()));
setAttribute(protobuf, "JVM Heap Max", byteCountToDisplaySize(stats.getJvm().getMem().getHeapMax().getBytes()));
IssueQuery.Builder query = IssueQuery.builder();
// There are 12 issues in total, with 10 issues per page, the page 2 should only contain 2 elements
SearchResponse result = underTest.search(query.build(), new SearchOptions().setPage(2, 10));
- assertThat(result.getHits().hits()).hasSize(2);
+ assertThat(result.getHits().getHits()).hasSize(2);
assertThat(result.getHits().getTotalHits()).isEqualTo(12);
result = underTest.search(IssueQuery.builder().build(), new SearchOptions().setOffset(0).setLimit(5));
- assertThat(result.getHits().hits()).hasSize(5);
+ assertThat(result.getHits().getHits()).hasSize(5);
assertThat(result.getHits().getTotalHits()).isEqualTo(12);
result = underTest.search(IssueQuery.builder().build(), new SearchOptions().setOffset(2).setLimit(0));
- assertThat(result.getHits().hits()).hasSize(10);
+ assertThat(result.getHits().getHits()).hasSize(10);
assertThat(result.getHits().getTotalHits()).isEqualTo(12);
}
IssueQuery.Builder query = IssueQuery.builder();
SearchResponse result = underTest.search(query.build(), new SearchOptions().setLimit(Integer.MAX_VALUE));
- assertThat(result.getHits().hits()).hasSize(SearchOptions.MAX_LIMIT);
+ assertThat(result.getHits().getHits()).hasSize(SearchOptions.MAX_LIMIT);
}
@Test
.filter(authorizationTypeSupport.createQueryFilter()))
.get()
.getHits();
- List<String> names = Arrays.stream(hits.hits())
- .map(h -> h.getSource().get(FooIndexDefinition.FIELD_NAME).toString())
+ List<String> names = Arrays.stream(hits.getHits())
+ .map(h -> h.getSourceAsMap().get(FooIndexDefinition.FIELD_NAME).toString())
.collect(MoreCollectors.toList());
return names.size() == 2 && names.contains("bar") && names.contains("baz");
}
assertThat(dbClient.userDao().selectByLogin(session, "user").getId()).isEqualTo(dto.getId());
List<SearchHit> indexUsers = es.getDocuments(UserIndexDefinition.TYPE_USER);
assertThat(indexUsers).hasSize(1);
- assertThat(indexUsers.get(0).getSource())
+ assertThat(indexUsers.get(0).getSourceAsMap())
.contains(
entry("login", "user"),
entry("name", "User"),
List<SearchHit> indexUsers = es.getDocuments(UserIndexDefinition.TYPE_USER);
assertThat(indexUsers).hasSize(1);
- assertThat(indexUsers.get(0).getSource())
+ assertThat(indexUsers.get(0).getSourceAsMap())
.contains(
entry("login", DEFAULT_LOGIN),
entry("name", "Marius2"),
List<SearchHit> indexUsers = es.getDocuments(UserIndexDefinition.TYPE_USER);
assertThat(indexUsers).hasSize(1);
- assertThat(indexUsers.get(0).getSource())
+ assertThat(indexUsers.get(0).getSourceAsMap())
.contains(entry("login", "new_login"));
}