@@ -64,7 +64,7 @@ public class LoadChangedIssuesStep implements ComputationStep { | |||
@Override | |||
public String getDescription() { | |||
return "Load changed issues for indexation"; | |||
return "Load changed issues for indexing"; | |||
} | |||
} |
@@ -48,7 +48,7 @@ public final class IgnoreOrphanBranchStep implements ComputationStep { | |||
Optional<ComponentDto> componentDto = dbClient.componentDao().selectByUuid(dbSession, componentUuid); | |||
Optional<EntityDto> entityDto = dbClient.entityDao().selectByUuid(dbSession, entityUuid); | |||
if (componentDto.isEmpty() || entityDto.isEmpty()) { | |||
LOG.info("reindexation task has been trigger on an orphan branch. removing any exclude_from_purge flag, and skip the indexation"); | |||
LOG.info("reindexing task has been trigger on an orphan branch. removing any exclude_from_purge flag, and skip the indexing"); | |||
dbClient.branchDao().updateExcludeFromPurge(dbSession, componentUuid, false); | |||
dbClient.branchDao().updateNeedIssueSync(dbSession, componentUuid, false); | |||
dbSession.commit(); |
@@ -64,7 +64,7 @@ public class LoadChangedIssuesStepTest { | |||
@Test | |||
public void getDescription_shouldReturnDescription() { | |||
assertThat(underTest.getDescription()).isEqualTo("Load changed issues for indexation"); | |||
assertThat(underTest.getDescription()).isEqualTo("Load changed issues for indexing"); | |||
} | |||
@Test |
@@ -40,7 +40,7 @@ public interface InternalCeQueue extends CeQueue { | |||
* The task status is changed to {@link org.sonar.db.ce.CeQueueDto.Status#IN_PROGRESS}. | |||
* Does not return anything if workers are paused or being paused (see {@link #getWorkersPauseStatus()}. | |||
* | |||
* @param excludeIndexationJob change the underlying request to exclude indexation tasks. | |||
* @param excludeIndexationJob change the underlying request to exclude indexing tasks. | |||
* | |||
* <p>Only a single task can be peeked by project.</p> | |||
* |
@@ -54,8 +54,8 @@ public class NextPendingTaskPicker { | |||
} | |||
Optional<CeQueueDto> findPendingTask(String workerUuid, DbSession dbSession, boolean prioritizeAnalysisAndRefresh) { | |||
// try to find tasks including indexation job & excluding app/portfolio and if no match, try the opposite | |||
// when prioritizeAnalysisAndRefresh is false, search first excluding indexation jobs and including app/portfolio, then the opposite | |||
// try to find tasks including indexing job & excluding app/portfolio and if no match, try the opposite | |||
// when prioritizeAnalysisAndRefresh is false, search first excluding indexing jobs and including app/portfolio, then the opposite | |||
Optional<CeTaskDtoLight> eligibleForPeek = ceQueueDao.selectEligibleForPeek(dbSession, prioritizeAnalysisAndRefresh, !prioritizeAnalysisAndRefresh); | |||
Optional<CeTaskDtoLight> eligibleForPeekInParallel = eligibleForPeekInParallel(dbSession); | |||
@@ -230,7 +230,7 @@ public class IssueIndexerIT { | |||
String projectUuid = issue.getProjectUuid(); | |||
assertThatThrownBy(() -> underTest.indexOnAnalysis(projectUuid)) | |||
.isInstanceOf(IllegalStateException.class) | |||
.hasMessage("Unrecoverable indexation failures: 1 errors among 1 requests. Check Elasticsearch logs for further details."); | |||
.hasMessage("Unrecoverable indexing failures: 1 errors among 1 requests. Check Elasticsearch logs for further details."); | |||
assertThatIndexHasSize(0); | |||
assertThatEsQueueTableHasSize(0); | |||
es.unlockWrites(TYPE_ISSUE); | |||
@@ -478,7 +478,7 @@ public class IssueIndexerIT { | |||
List<String> issues = List.of("Issue1"); | |||
assertThatThrownBy(() -> underTest.deleteByKeys("P1", issues)) | |||
.isInstanceOf(IllegalStateException.class) | |||
.hasMessage("Unrecoverable indexation failures: 1 errors among 1 requests. Check Elasticsearch logs for further details."); | |||
.hasMessage("Unrecoverable indexing failures: 1 errors among 1 requests. Check Elasticsearch logs for further details."); | |||
assertThatIndexHasOnly("Issue1"); | |||
assertThatEsQueueTableHasSize(0); | |||
es.unlockWrites(TYPE_ISSUE); |
@@ -42,22 +42,22 @@ public interface Indexers { | |||
} | |||
/** | |||
* Re-index data based on the event. It commits the DB session once any indexation request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexation event should | |||
* Re-index data based on the event. It commits the DB session once any indexing request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexing event should | |||
* be done using the same DB session and the session should be uncommitted. | |||
*/ | |||
void commitAndIndexOnEntityEvent(DbSession dbSession, Collection<String> entityUuids, EntityEvent cause); | |||
/** | |||
* Re-index data based on the event. It commits the DB session once any indexation request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexation event should | |||
* Re-index data based on the event. It commits the DB session once any indexing request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexing event should | |||
* be done using the same DB session and the session should be uncommitted. | |||
*/ | |||
void commitAndIndexOnBranchEvent(DbSession dbSession, Collection<String> branchUuids, BranchEvent cause); | |||
/** | |||
* Re-index data based on the event. It commits the DB session once any indexation request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexation event should | |||
* Re-index data based on the event. It commits the DB session once any indexing request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexing event should | |||
* be done using the same DB session and the session should be uncommitted. | |||
*/ | |||
default void commitAndIndexEntities(DbSession dbSession, Collection<? extends EntityDto> entities, EntityEvent cause) { | |||
@@ -68,8 +68,8 @@ public interface Indexers { | |||
} | |||
/** | |||
* Re-index data based on the event. It commits the DB session once any indexation request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexation event should | |||
* Re-index data based on the event. It commits the DB session once any indexing request was written in the same session, | |||
* ensuring consistency between the DB and the indexes. Therefore, DB data changes that cause the indexing event should | |||
* be done using the same DB session and the session should be uncommitted. | |||
*/ | |||
default void commitAndIndexBranches(DbSession dbSession, Collection<BranchDto> branches, BranchEvent cause) { |
@@ -41,11 +41,11 @@ public class IndexersImpl implements Indexers { | |||
} | |||
/** | |||
* Asks all indexers to queue an indexation request in the DB to index the specified entities, if needed (according to | |||
* Asks all indexers to queue an indexing request in the DB to index the specified entities, if needed (according to | |||
* "cause" parameter), then call all indexers to index the requests. | |||
* The idea is that the indexation requests are committed into the DB at the same time as the data that caused those requests | |||
* The idea is that the indexing requests are committed into the DB at the same time as the data that caused those requests | |||
* to be created, for consistency. | |||
* If the indexation fails, the indexation requests will still be in the DB and can be processed again later. | |||
* If the indexing fails, the indexing requests will still be in the DB and can be processed again later. | |||
*/ | |||
@Override | |||
public void commitAndIndexOnEntityEvent(DbSession dbSession, Collection<String> entityUuids, EntityEvent cause) { | |||
@@ -53,11 +53,11 @@ public class IndexersImpl implements Indexers { | |||
} | |||
/** | |||
* Asks all indexers to queue an indexation request in the DB to index the specified branches, if needed (according to | |||
* Asks all indexers to queue an indexing request in the DB to index the specified branches, if needed (according to | |||
* "cause" parameter), then call all indexers to index the requests. | |||
* The idea is that the indexation requests are committed into the DB at the same time as the data that caused those requests | |||
* The idea is that the indexing requests are committed into the DB at the same time as the data that caused those requests | |||
* to be created, for consistency. | |||
* If the indexation fails, the indexation requests will still be in the DB and can be processed again later. | |||
* If the indexing fails, the indexing requests will still be in the DB and can be processed again later. | |||
*/ | |||
@Override | |||
public void commitAndIndexOnBranchEvent(DbSession dbSession, Collection<String> branchUuids, BranchEvent cause) { |
@@ -39,7 +39,7 @@ public interface IndexingListener { | |||
public void onFinish(IndexingResult result) { | |||
if (result.getFailures() > 0) { | |||
throw new IllegalStateException( | |||
format("Unrecoverable indexation failures: %d errors among %d requests. Check Elasticsearch logs for further details.", | |||
format("Unrecoverable indexing failures: %d errors among %d requests. Check Elasticsearch logs for further details.", | |||
result.getFailures(), | |||
result.getTotal())); | |||
} |
@@ -24,12 +24,12 @@ import org.sonar.db.DbSession; | |||
import org.sonar.db.es.EsQueueDto; | |||
/** | |||
* Indexers that are resilient. These indexers handle indexation items that are queued in the DB. | |||
* Indexers that are resilient. These indexers handle indexing items that are queued in the DB. | |||
*/ | |||
public interface ResilientIndexer extends StartupIndexer { | |||
/** | |||
* Index the items and delete them from es_queue DB table when the indexation | |||
* Index the items and delete them from es_queue DB table when the indexing | |||
* is done. If there is a failure, the items are kept in DB to be re-processed later. | |||
* | |||
* @param dbSession the db session |
@@ -35,7 +35,7 @@ public class FailOnErrorIndexingListenerTest { | |||
assertThatThrownBy(() -> FAIL_ON_ERROR.onFinish(indexingResult)) | |||
.isInstanceOf(IllegalStateException.class) | |||
.hasMessage("Unrecoverable indexation failures: 1 errors among 1 requests. " | |||
.hasMessage("Unrecoverable indexing failures: 1 errors among 1 requests. " | |||
+ "Check Elasticsearch logs for further details."); | |||
} | |||
@@ -97,7 +97,7 @@ public class PermissionIndexerTest { | |||
indexOnStartup(); | |||
assertThat(es.countDocuments(INDEX_TYPE_FOO_AUTH)).isEqualTo(2); | |||
// Simulate a indexation issue | |||
// Simulate an indexing issue | |||
db.getDbClient().purgeDao().deleteProject(db.getSession(), project1.getUuid(), PROJECT, project1.getName(), project1.getKey()); | |||
underTest.prepareForRecoveryOnEntityEvent(db.getSession(), asList(project1.getUuid()), EntityEvent.DELETION); | |||
assertThat(db.countRowsOfTable(db.getSession(), "es_queue")).isOne(); |
@@ -355,7 +355,7 @@ describe('issues app when reindexing', () => { | |||
expect(ui.issueStatusFacet.query()).not.toBeInTheDocument(); | |||
expect(ui.tagFacet.query()).not.toBeInTheDocument(); | |||
// Indexation message | |||
// Indexing message | |||
expect(screen.getByText(/indexation\.filters_unavailable/)).toBeInTheDocument(); | |||
}); | |||
}); |
@@ -63,7 +63,7 @@ public class AsyncIssueIndexingImpl implements AsyncIssueIndexing { | |||
try (DbSession dbSession = dbClient.openSession(false)) { | |||
// remove already existing indexation task, if any | |||
// remove already existing indexing task, if any | |||
removeExistingIndexationTasks(dbSession); | |||
dbClient.branchDao().updateAllNeedIssueSync(dbSession); | |||
@@ -99,7 +99,7 @@ public class AsyncIssueIndexingImpl implements AsyncIssueIndexing { | |||
public void triggerForProject(String projectUuid) { | |||
try (DbSession dbSession = dbClient.openSession(false)) { | |||
// remove already existing indexation task, if any | |||
// remove already existing indexing task, if any | |||
removeExistingIndexationTasksForProject(dbSession, projectUuid); | |||
dbClient.branchDao().updateAllNeedIssueSyncForProject(dbSession, projectUuid); | |||
@@ -162,14 +162,18 @@ public class AsyncIssueIndexingImpl implements AsyncIssueIndexing { | |||
} | |||
private void removeIndexationTasks(DbSession dbSession, Set<String> ceQueueUuids, Set<String> ceActivityUuids) { | |||
LOG.info(String.format("%s pending indexation task found to be deleted...", ceQueueUuids.size())); | |||
LOG.atInfo().setMessage("{} pending indexing task found to be deleted...") | |||
.addArgument(ceQueueUuids.size()) | |||
.log(); | |||
for (String uuid : ceQueueUuids) { | |||
dbClient.ceQueueDao().deleteByUuid(dbSession, uuid); | |||
} | |||
LOG.info(String.format("%s completed indexation task found to be deleted...", ceQueueUuids.size())); | |||
LOG.atInfo().setMessage("{} completed indexing task found to be deleted...") | |||
.addArgument(ceQueueUuids.size()) | |||
.log(); | |||
dbClient.ceActivityDao().deleteByUuids(dbSession, ceActivityUuids); | |||
LOG.info("Indexation task deletion complete."); | |||
LOG.info("Indexing task deletion complete."); | |||
LOG.info("Deleting tasks characteristics..."); | |||
Set<String> tasksUuid = Stream.concat(ceQueueUuids.stream(), ceActivityUuids.stream()).collect(Collectors.toSet()); |
@@ -159,9 +159,9 @@ public class AsyncIssueIndexingImplTest { | |||
assertThat(logTester.logs(Level.INFO)) | |||
.contains( | |||
"1 pending indexation task found to be deleted...", | |||
"1 completed indexation task found to be deleted...", | |||
"Indexation task deletion complete.", | |||
"1 pending indexing task found to be deleted...", | |||
"1 completed indexing task found to be deleted...", | |||
"Indexing task deletion complete.", | |||
"Deleting tasks characteristics...", | |||
"Tasks characteristics deletion complete."); | |||
} | |||
@@ -197,7 +197,7 @@ public class AsyncIssueIndexingImplTest { | |||
assertThat(dbClient.ceActivityDao().selectByTaskType(dbTester.getSession(), REPORT)).hasSize(1); | |||
assertThat(dbClient.ceTaskCharacteristicsDao().selectByTaskUuids(dbTester.getSession(), new HashSet<>(List.of("uuid_2")))).isEmpty(); | |||
// verify that the canceled tasks on anotherProject is still here, and was not removed by the project reindexation | |||
// verify that the canceled tasks on anotherProject is still here, and was not removed by the project reindexing | |||
assertThat(dbClient.ceActivityDao().selectByTaskType(dbTester.getSession(), BRANCH_ISSUE_SYNC)) | |||
.hasSize(1) | |||
.extracting(CeActivityDto::getEntityUuid) | |||
@@ -205,9 +205,9 @@ public class AsyncIssueIndexingImplTest { | |||
assertThat(logTester.logs(Level.INFO)) | |||
.contains( | |||
"2 pending indexation task found to be deleted...", | |||
"2 completed indexation task found to be deleted...", | |||
"Indexation task deletion complete.", | |||
"2 pending indexing task found to be deleted...", | |||
"2 completed indexing task found to be deleted...", | |||
"Indexing task deletion complete.", | |||
"Deleting tasks characteristics...", | |||
"Tasks characteristics deletion complete.", | |||
"Tasks characteristics deletion complete.", |
@@ -43,7 +43,7 @@ public class IndexationStatusAction implements CeWsAction { | |||
@Override | |||
public void define(WebService.NewController controller) { | |||
controller.createAction("indexation_status") | |||
.setDescription("Returns the count of projects with completed issue indexation.") | |||
.setDescription("Returns the count of projects with completed issue indexing.") | |||
.setResponseExample(getClass().getResource("indexation_status-example.json")) | |||
.setChangelog(new Change("10.2", "Project count is returned instead of branch percentage.")) | |||
.setHandler(this) |
@@ -90,7 +90,7 @@ public class SearchEventsAction implements DevelopersWsAction { | |||
WebService.NewAction action = controller.createAction("search_events") | |||
.setDescription("Search for events.<br/>" + | |||
"Requires authentication." | |||
+ "<br/>When issue indexation is in progress returns 503 service unavailable HTTP code.") | |||
+ "<br/>When issue indexing is in progress returns 503 service unavailable HTTP code.") | |||
.setSince("1.0") | |||
.setInternal(true) | |||
.setHandler(this) |
@@ -97,7 +97,7 @@ public class ListAction implements HotspotsWsAction { | |||
.createAction("list") | |||
.setHandler(this) | |||
.setInternal(true) | |||
.setDescription("List Security Hotpots. This endpoint is used in degraded mode, when issue indexation is running." + | |||
.setDescription("List Security Hotpots. This endpoint is used in degraded mode, when issue indexing is running." + | |||
"<br>Total number of Security Hotspots will be always equal to a page size, as counting all issues is not supported. " + | |||
"<br>Requires the 'Browse' permission on the specified project. ") | |||
.setSince("10.2"); |
@@ -222,7 +222,7 @@ public class SearchAction implements HotspotsWsAction { | |||
.setDescription("Search for Security Hotpots. <br>" | |||
+ "Requires the 'Browse' permission on the specified project(s). <br>" | |||
+ "For applications, it also requires 'Browse' permission on its child projects. <br>" | |||
+ "When issue indexation is in progress returns 503 service unavailable HTTP code.") | |||
+ "When issue indexing is in progress returns 503 service unavailable HTTP code.") | |||
.setSince("8.1") | |||
.setChangelog( | |||
new Change("10.2", format("Parameter '%s' renamed to '%s'", PARAM_PROJECT_KEY, PARAM_PROJECT)), |
@@ -72,7 +72,7 @@ public class AuthorsAction implements IssuesWsAction { | |||
.setSince("5.1") | |||
.setDescription("Search SCM accounts which match a given query.<br/>" + | |||
"Requires authentication." | |||
+ "<br/>When issue indexation is in progress returns 503 service unavailable HTTP code.") | |||
+ "<br/>When issue indexing is in progress returns 503 service unavailable HTTP code.") | |||
.setResponseExample(Resources.getResource(this.getClass(), "authors-example.json")) | |||
.setChangelog(new Change("7.4", "The maximum size of 'ps' is set to 100")) | |||
.setHandler(this); |
@@ -69,7 +69,7 @@ public class ComponentTagsAction implements IssuesWsAction { | |||
.setSince("5.1") | |||
.setInternal(true) | |||
.setDescription("List tags for the issues under a given component (including issues on the descendants of the component)" | |||
+ "<br/>When issue indexation is in progress returns 503 service unavailable HTTP code.") | |||
+ "<br/>When issue indexing is in progress returns 503 service unavailable HTTP code.") | |||
.setResponseExample(Resources.getResource(getClass(), "component-tags-example.json")); | |||
action.createParam(PARAM_COMPONENT_UUID) |
@@ -91,7 +91,7 @@ public class ListAction implements IssuesWsAction { | |||
.createAction(ACTION_LIST) | |||
.setHandler(this) | |||
.setInternal(true) | |||
.setDescription("List issues. This endpoint is used in degraded mode, when issue indexation is running." + | |||
.setDescription("List issues. This endpoint is used in degraded mode, when issue indexing is running." + | |||
"<br>Either 'project' or 'component' parameter is required." + | |||
"<br>Total number of issues will be always equal to a page size, as this counting all issues is not supported. " + | |||
"<br>Requires the 'Browse' permission on the specified project. ") |
@@ -209,7 +209,7 @@ public class SearchAction implements IssuesWsAction { | |||
.setHandler(this) | |||
.setDescription("Search for issues.<br>Requires the 'Browse' permission on the specified project(s). <br>" | |||
+ "For applications, it also requires 'Browse' permission on its child projects." | |||
+ "<br/>When issue indexation is in progress returns 503 service unavailable HTTP code.") | |||
+ "<br/>When issue indexing is in progress returns 503 service unavailable HTTP code.") | |||
.setSince("3.6") | |||
.setChangelog( | |||
new Change("10.4", "Added new param '%s'".formatted(PARAM_FIXED_IN_PULL_REQUEST)), |
@@ -650,7 +650,7 @@ project_branch_pull_request.branch.delete=Delete branch | |||
project_branch_pull_request.branch.actions_label=Update {0} | |||
project_branch_pull_request.branch.delete.are_you_sure=Are you sure you want to delete branch "{name}"? | |||
project_branch_pull_request.branch.main_branch.are_you_sure=Are you sure you want to set branch "{branch}" as the main branch of this project? | |||
project_branch_pull_request.branch.main_branch.requires_reindex=Changing the main branch of your project will trigger a project re-indexation and may impact the level of information that is available until re-indexing is complete. | |||
project_branch_pull_request.branch.main_branch.requires_reindex=Changing the main branch of your project will trigger a project re-indexing and may impact the level of information that is available until re-indexing is complete. | |||
project_branch_pull_request.branch.main_branch.learn_more=Please refer to the {documentation} to understand the impacts of changing the main branch. | |||
project_branch_pull_request.branch.auto_deletion.keep_when_inactive=Keep when inactive | |||
project_branch_pull_request.branch.auto_deletion.keep_when_inactive.tooltip=When turned on, the branch will not be automatically deleted when inactive. | |||
@@ -5126,7 +5126,7 @@ maintenance.sonarqube_is_offline.text=The connection to SonarQube is lost. Pleas | |||
#------------------------------------------------------------------------------ | |||
# | |||
# INDEXATION | |||
# INDEXING | |||
# | |||
#------------------------------------------------------------------------------ | |||
indexation.in_progress=Reindexing in progress. |
@@ -84,7 +84,7 @@ public class ProjectFileIndexer { | |||
} | |||
public void index() { | |||
progressReport = new ProgressReport("Report about progress of file indexation", TimeUnit.SECONDS.toMillis(10)); | |||
progressReport = new ProgressReport("Report about progress of file indexing", TimeUnit.SECONDS.toMillis(10)); | |||
progressReport.start("Indexing files..."); | |||
LOG.info("Project configuration:"); | |||
projectExclusionFilters.log(" "); |