diff options
author | Teemu Suo-Anttila <tsuoanttila@users.noreply.github.com> | 2017-03-24 13:04:09 +0200 |
---|---|---|
committer | Henri Sara <henri.sara@gmail.com> | 2017-03-24 13:04:09 +0200 |
commit | 0bee1dc5f8bb5314b3b71fd077709dd4f2701742 (patch) | |
tree | 8b7194d343ddbd760106186aeb13758e5c6663fb /server/src | |
parent | e905e2bb8057d19128bc5bd052d73ee8f29687a8 (diff) | |
download | vaadin-framework-0bee1dc5f8bb5314b3b71fd077709dd4f2701742.tar.gz vaadin-framework-0bee1dc5f8bb5314b3b71fd077709dd4f2701742.zip |
Improve caching when expanding nodes in hierarchical data (#8902)
Fixes #8790
Diffstat (limited to 'server/src')
-rw-r--r-- | server/src/main/java/com/vaadin/data/provider/HierarchicalDataCommunicator.java | 61 |
1 files changed, 33 insertions, 28 deletions
diff --git a/server/src/main/java/com/vaadin/data/provider/HierarchicalDataCommunicator.java b/server/src/main/java/com/vaadin/data/provider/HierarchicalDataCommunicator.java index 5bb1335ed0..b93d0c3952 100644 --- a/server/src/main/java/com/vaadin/data/provider/HierarchicalDataCommunicator.java +++ b/server/src/main/java/com/vaadin/data/provider/HierarchicalDataCommunicator.java @@ -145,38 +145,41 @@ public class HierarchicalDataCommunicator<T> extends DataCommunicator<T> { private void loadRequestedRows() { final Range requestedRows = getPushRows(); if (!requestedRows.isEmpty()) { - Stream<TreeLevelQuery> levelQueries = mapper - .splitRangeToLevelQueries(requestedRows.getStart(), - requestedRows.getEnd() - 1); - - JsonObject[] dataObjects = new JsonObject[requestedRows.length()]; - BiConsumer<JsonObject, Integer> rowDataMapper = (object, - index) -> dataObjects[index - - requestedRows.getStart()] = object; - List<T> fetchedItems = new ArrayList<>(dataObjects.length); - - levelQueries.forEach(query -> { - List<T> results = doFetchQuery(query.startIndex, query.size, - getKeyMapper().get(query.node.getParentKey())) - .collect(Collectors.toList()); - // TODO if the size differers from expected, all goes to hell - fetchedItems.addAll(results); - List<JsonObject> rowData = results.stream() - .map(item -> createDataObject(item, query.depth)) - .collect(Collectors.toList()); - mapper.reorderLevelQueryResultsToFlatOrdering(rowDataMapper, - query, rowData); - }); - verifyNoNullItems(dataObjects, requestedRows); - - sendData(requestedRows.getStart(), Arrays.asList(dataObjects)); - getActiveDataHandler().addActiveData(fetchedItems.stream()); - getActiveDataHandler().cleanUp(fetchedItems.stream()); + doPushRows(requestedRows); } setPushRows(Range.withLength(0, 0)); } + private void doPushRows(final Range requestedRows) { + Stream<TreeLevelQuery> levelQueries = mapper.splitRangeToLevelQueries( + requestedRows.getStart(), requestedRows.getEnd() - 1); + + JsonObject[] dataObjects = new JsonObject[requestedRows.length()]; + BiConsumer<JsonObject, Integer> rowDataMapper = (object, + index) -> dataObjects[index + - requestedRows.getStart()] = object; + List<T> fetchedItems = new ArrayList<>(dataObjects.length); + + levelQueries.forEach(query -> { + List<T> results = doFetchQuery(query.startIndex, query.size, + getKeyMapper().get(query.node.getParentKey())) + .collect(Collectors.toList()); + // TODO if the size differers from expected, all goes to hell + fetchedItems.addAll(results); + List<JsonObject> rowData = results.stream() + .map(item -> createDataObject(item, query.depth)) + .collect(Collectors.toList()); + mapper.reorderLevelQueryResultsToFlatOrdering(rowDataMapper, query, + rowData); + }); + verifyNoNullItems(dataObjects, requestedRows); + + sendData(requestedRows.getStart(), Arrays.asList(dataObjects)); + getActiveDataHandler().addActiveData(fetchedItems.stream()); + getActiveDataHandler().cleanUp(fetchedItems.stream()); + } + /* * Verify that there are no null objects in the array, to fail eagerly and * not just on the client side. @@ -387,8 +390,10 @@ public class HierarchicalDataCommunicator<T> extends DataCommunicator<T> { mapper.expand(expandedRowKey, expandedRowIndex, expandedNodeSize); - // TODO optimize by sending "enough" of the expanded items directly getClientRpc().insertRows(expandedRowIndex + 1, expandedNodeSize); + // TODO optimize by sending "just enough" of the expanded items directly + doPushRows(Range.withLength(expandedRowIndex + 1, expandedNodeSize)); + // expanded node needs to be updated to be marked as expanded // FIXME seems like a slight overkill to do this just for refreshing // expanded status |