Parcourir la source

Improve caching when expanding nodes in hierarchical data (#8902)

Fixes #8790
tags/8.1.0.alpha3
Teemu Suo-Anttila il y a 7 ans
Parent
révision
0bee1dc5f8

+ 111
- 3
client/src/main/java/com/vaadin/client/data/AbstractRemoteDataSource.java Voir le fichier

@@ -49,6 +49,9 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
* Callback used by
* {@link AbstractRemoteDataSource#requestRows(int, int, RequestRowsCallback)}
* to pass data to the underlying implementation when data has been fetched.
*
* @param <T>
* the row type
*/
public static class RequestRowsCallback<T> {
private final Range requestedRange;
@@ -56,7 +59,7 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
private final AbstractRemoteDataSource<T> source;

/**
* Creates a new callback
* Creates a new callback.
*
* @param source
* the data source for which the request is made
@@ -181,6 +184,24 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
private final HashMap<Integer, T> indexToRowMap = new HashMap<>();
private final HashMap<Object, Integer> keyToIndexMap = new HashMap<>();

/**
* Map used to temporarily store rows invalidated by
* {@link #insertRowData(int, int)}. All invalidated rows are stored with
* their indices matching the state after row insertion. If the backend has
* pre-emptively pushed the row data for the added rows, these rows will be
* used again to fill the cache.
* <p>
* To avoid this cache invalidation issue from getting too big, this class
* does not attempt to track these rows indefinitely. Multiple row
* manipulations without filling the gaps in between will remove all
* invalidated rows and prevent any attempts to restore them. This is
* indicated by having the map empty, not {@code null}.
* <p>
* The map is set to {@code null} upon requesting the rows from the backend
* to indicate that future row additions can attempt to restore the cache.
*/
private Map<Integer, T> invalidatedRows;

private Set<DataChangeHandler> dataChangeHandlers = new LinkedHashSet<>();

private CacheStrategy cacheStrategy = new CacheStrategy.DefaultCacheStrategy();
@@ -277,6 +298,9 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {

Profiler.enter("AbstractRemoteDataSource.checkCacheCoverage");

// Clean up invalidated data
invalidatedRows = null;

Range minCacheRange = getMinCacheRange();

if (!minCacheRange.intersects(cached) || cached.isEmpty()) {
@@ -496,6 +520,8 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
*/
if (!cached.isEmpty()) {
cached = cached.combineWith(newUsefulData);
// Attempt to restore invalidated items
fillCacheFromInvalidatedRows(maxCacheRange);
} else {
cached = newUsefulData;
}
@@ -505,6 +531,7 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
cached.length()));

updatePinnedRows(rowData);

}

if (!partition[0].isEmpty() || !partition[2].isEmpty()) {
@@ -533,6 +560,58 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {

}

/**
* Go through items invalidated by {@link #insertRowData(int, int)}. If the
* server has pre-emptively sent added row data immediately after informing
* of row addition, the invalid cache can be restored to proper index range.
*
* @param maxCacheRange
* the maximum amount of rows that can cached
*/
private void fillCacheFromInvalidatedRows(Range maxCacheRange) {
if (invalidatedRows == null || invalidatedRows.isEmpty()) {
// No old invalid cache available
return;
}

Range potentialCache = maxCacheRange.partitionWith(cached)[2];
int start = potentialCache.getStart();
int last = start;
try {
if (potentialCache.isEmpty()
|| invalidatedRows.containsKey(start - 1)) {
// Cache is already full or invalidated rows contains unexpected
// indices.
return;
}

for (int i = start; i < potentialCache.getEnd(); ++i) {
if (!invalidatedRows.containsKey(i)) {
return;
}
T row = invalidatedRows.get(i);
indexToRowMap.put(i, row);
keyToIndexMap.put(getRowKey(row), i);
last = i;
}

// Cache filled from invalidated rows. Can continue as if it was
// never invalidated.
invalidatedRows = null;
} finally {
// Update cache range and clean up
if (invalidatedRows != null) {
invalidatedRows.clear();
}

Range updated = Range.between(start, last + 1);
cached = cached.combineWith(updated);

dataChangeHandlers.forEach(dch -> dch
.dataUpdated(updated.getStart(), updated.length()));
}
}

private Stream<DataChangeHandler> getHandlers() {
Set<DataChangeHandler> copy = new LinkedHashSet<>(dataChangeHandlers);
return copy.stream();
@@ -560,6 +639,12 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
protected void removeRowData(int firstRowIndex, int count) {
Profiler.enter("AbstractRemoteDataSource.removeRowData");

// Cache was not filled since previous insertRowData. The old rows are
// no longer useful.
if (invalidatedRows != null) {
invalidatedRows.clear();
}

size -= count;

Range removedRange = Range.withLength(firstRowIndex, count);
@@ -604,6 +689,12 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
protected void insertRowData(int firstRowIndex, int count) {
Profiler.enter("AbstractRemoteDataSource.insertRowData");

// Cache was not filled since previous insertRowData. The old rows are
// no longer useful.
if (invalidatedRows != null) {
invalidatedRows.clear();
}

size += count;

if (firstRowIndex <= cached.getStart()) {
@@ -625,7 +716,24 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
* If holes were supported, we could shift the higher part of
* "cached" and leave a hole the size of "count" in the middle.
*/
cached = cached.splitAt(firstRowIndex)[0];
Range[] splitAt = cached.splitAt(firstRowIndex);
cached = splitAt[0];
Range invalid = splitAt[1];

/*
* If we already have a map in invalidatedRows, we're in a state
* where multiple row manipulations without data received have
* happened and the cache restoration is prevented completely.
*/

if (!invalid.isEmpty() && invalidatedRows == null) {
invalidatedRows = new HashMap<>();
// Store all invalidated items to a map. Indices are updated to
// match what they should be after the insertion.
for (int i = invalid.getStart(); i < invalid.getEnd(); ++i) {
invalidatedRows.put(i + count, indexToRowMap.get(i));
}
}

for (int i = firstRowIndex; i < oldCacheEnd; i++) {
T row = indexToRowMap.remove(Integer.valueOf(i));
@@ -657,7 +765,7 @@ public abstract class AbstractRemoteDataSource<T> implements DataSource<T> {
}

/**
* Gets the current range of cached rows
* Gets the current range of cached rows.
*
* @return the range of currently cached rows
*/

+ 33
- 28
server/src/main/java/com/vaadin/data/provider/HierarchicalDataCommunicator.java Voir le fichier

@@ -145,38 +145,41 @@ public class HierarchicalDataCommunicator<T> extends DataCommunicator<T> {
private void loadRequestedRows() {
final Range requestedRows = getPushRows();
if (!requestedRows.isEmpty()) {
Stream<TreeLevelQuery> levelQueries = mapper
.splitRangeToLevelQueries(requestedRows.getStart(),
requestedRows.getEnd() - 1);

JsonObject[] dataObjects = new JsonObject[requestedRows.length()];
BiConsumer<JsonObject, Integer> rowDataMapper = (object,
index) -> dataObjects[index
- requestedRows.getStart()] = object;
List<T> fetchedItems = new ArrayList<>(dataObjects.length);

levelQueries.forEach(query -> {
List<T> results = doFetchQuery(query.startIndex, query.size,
getKeyMapper().get(query.node.getParentKey()))
.collect(Collectors.toList());
// TODO if the size differers from expected, all goes to hell
fetchedItems.addAll(results);
List<JsonObject> rowData = results.stream()
.map(item -> createDataObject(item, query.depth))
.collect(Collectors.toList());
mapper.reorderLevelQueryResultsToFlatOrdering(rowDataMapper,
query, rowData);
});
verifyNoNullItems(dataObjects, requestedRows);

sendData(requestedRows.getStart(), Arrays.asList(dataObjects));
getActiveDataHandler().addActiveData(fetchedItems.stream());
getActiveDataHandler().cleanUp(fetchedItems.stream());
doPushRows(requestedRows);
}

setPushRows(Range.withLength(0, 0));
}

private void doPushRows(final Range requestedRows) {
Stream<TreeLevelQuery> levelQueries = mapper.splitRangeToLevelQueries(
requestedRows.getStart(), requestedRows.getEnd() - 1);

JsonObject[] dataObjects = new JsonObject[requestedRows.length()];
BiConsumer<JsonObject, Integer> rowDataMapper = (object,
index) -> dataObjects[index
- requestedRows.getStart()] = object;
List<T> fetchedItems = new ArrayList<>(dataObjects.length);

levelQueries.forEach(query -> {
List<T> results = doFetchQuery(query.startIndex, query.size,
getKeyMapper().get(query.node.getParentKey()))
.collect(Collectors.toList());
// TODO if the size differers from expected, all goes to hell
fetchedItems.addAll(results);
List<JsonObject> rowData = results.stream()
.map(item -> createDataObject(item, query.depth))
.collect(Collectors.toList());
mapper.reorderLevelQueryResultsToFlatOrdering(rowDataMapper, query,
rowData);
});
verifyNoNullItems(dataObjects, requestedRows);

sendData(requestedRows.getStart(), Arrays.asList(dataObjects));
getActiveDataHandler().addActiveData(fetchedItems.stream());
getActiveDataHandler().cleanUp(fetchedItems.stream());
}

/*
* Verify that there are no null objects in the array, to fail eagerly and
* not just on the client side.
@@ -387,8 +390,10 @@ public class HierarchicalDataCommunicator<T> extends DataCommunicator<T> {

mapper.expand(expandedRowKey, expandedRowIndex, expandedNodeSize);

// TODO optimize by sending "enough" of the expanded items directly
getClientRpc().insertRows(expandedRowIndex + 1, expandedNodeSize);
// TODO optimize by sending "just enough" of the expanded items directly
doPushRows(Range.withLength(expandedRowIndex + 1, expandedNodeSize));

// expanded node needs to be updated to be marked as expanded
// FIXME seems like a slight overkill to do this just for refreshing
// expanded status

+ 27
- 0
uitest/src/main/java/com/vaadin/tests/components/treegrid/TreeGridBasicFeatures.java Voir le fichier

@@ -3,13 +3,18 @@ package com.vaadin.tests.components.treegrid;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;

import com.vaadin.annotations.Theme;
import com.vaadin.annotations.Widgetset;
import com.vaadin.data.HierarchyData;
import com.vaadin.data.provider.DataProvider;
import com.vaadin.data.provider.HierarchicalDataProvider;
import com.vaadin.data.provider.HierarchicalQuery;
import com.vaadin.data.provider.InMemoryHierarchicalDataProvider;
import com.vaadin.server.SerializablePredicate;
import com.vaadin.shared.Range;
import com.vaadin.tests.components.AbstractComponentTest;
import com.vaadin.ui.TreeGrid;

@@ -20,6 +25,7 @@ public class TreeGridBasicFeatures extends AbstractComponentTest<TreeGrid> {
private TreeGrid<HierarchicalTestBean> grid;
private InMemoryHierarchicalDataProvider<HierarchicalTestBean> inMemoryDataProvider;
private LazyHierarchicalDataProvider lazyDataProvider;
private HierarchicalDataProvider<HierarchicalTestBean, ?> loggingDataProvider;

@Override
public TreeGrid getComponent() {
@@ -82,6 +88,26 @@ public class TreeGridBasicFeatures extends AbstractComponentTest<TreeGrid> {

inMemoryDataProvider = new InMemoryHierarchicalDataProvider<>(data);
lazyDataProvider = new LazyHierarchicalDataProvider(3, 2);
loggingDataProvider = new InMemoryHierarchicalDataProvider<HierarchicalTestBean>(
data) {

@Override
public Stream<HierarchicalTestBean> fetchChildren(
HierarchicalQuery<HierarchicalTestBean, SerializablePredicate<HierarchicalTestBean>> query) {
Optional<HierarchicalTestBean> parentOptional = query
.getParentOptional();
if (parentOptional.isPresent()) {
log("Children request: " + parentOptional.get() + " ; "
+ Range.withLength(query.getOffset(),
query.getLimit()));
} else {
log("Root node request: " + Range
.withLength(query.getOffset(), query.getLimit()));
}
return super.fetchChildren(query);
}
};

}

@SuppressWarnings("unchecked")
@@ -90,6 +116,7 @@ public class TreeGridBasicFeatures extends AbstractComponentTest<TreeGrid> {
LinkedHashMap<String, DataProvider> options = new LinkedHashMap<>();
options.put("LazyHierarchicalDataProvider", lazyDataProvider);
options.put("InMemoryHierarchicalDataProvider", inMemoryDataProvider);
options.put("LoggingDataProvider", loggingDataProvider);

createSelectAction("Set data provider", CATEGORY_FEATURES, options,
"LazyHierarchicalDataProvider",

+ 52
- 0
uitest/src/test/java/com/vaadin/tests/components/treegrid/TreeGridExpandDataRequestTest.java Voir le fichier

@@ -0,0 +1,52 @@
package com.vaadin.tests.components.treegrid;

import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;

import com.vaadin.testbench.elements.TreeGridElement;
import com.vaadin.tests.tb3.SingleBrowserTest;

public class TreeGridExpandDataRequestTest extends SingleBrowserTest {

@Override
protected Class<?> getUIClass() {
return TreeGridBasicFeatures.class;
}

TreeGridElement grid;

@Before
public void before() {
openTestURL();
grid = $(TreeGridElement.class).first();
selectMenuPath("Component", "Features", "Set data provider",
"LoggingDataProvider");
clearLog();
}

private void clearLog() {
selectMenuPath("Settings", "Clear log");
}

@Test
public void expand_node0_does_not_request_root_nodes() {
grid.expandWithClick(0);
Assert.assertFalse("Log should not contain request for root nodes.",
logContainsText("Root node request: "));
}

@Test
public void expand_node0_after_node1_does_not_request_children_of_node1() {
grid.expandWithClick(1);
Assert.assertFalse("Log should not contain request for root nodes.",
logContainsText("Root node request: "));
clearLog();
grid.expandWithClick(0);
Assert.assertFalse(
"Log should not contain request for children of '0 | 1'.",
logContainsText("Children request: 0 | 1"));
Assert.assertFalse("Log should not contain request for root nodes.",
logContainsText("Root node request: "));
}
}

Chargement…
Annuler
Enregistrer