You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DataCommunicator.java 14KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * Copyright 2000-2016 Vaadin Ltd.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License"); you may not
  5. * use this file except in compliance with the License. You may obtain a copy of
  6. * the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. * License for the specific language governing permissions and limitations under
  14. * the License.
  15. */
  16. package com.vaadin.server.data;
  17. import java.io.Serializable;
  18. import java.util.ArrayList;
  19. import java.util.Collection;
  20. import java.util.Collections;
  21. import java.util.Comparator;
  22. import java.util.HashSet;
  23. import java.util.LinkedHashSet;
  24. import java.util.List;
  25. import java.util.Objects;
  26. import java.util.Set;
  27. import java.util.function.Predicate;
  28. import java.util.stream.Collectors;
  29. import java.util.stream.Stream;
  30. import com.vaadin.server.AbstractExtension;
  31. import com.vaadin.server.KeyMapper;
  32. import com.vaadin.shared.Range;
  33. import com.vaadin.shared.data.DataCommunicatorClientRpc;
  34. import com.vaadin.shared.data.DataCommunicatorConstants;
  35. import com.vaadin.shared.data.DataRequestRpc;
  36. import elemental.json.Json;
  37. import elemental.json.JsonArray;
  38. import elemental.json.JsonObject;
  39. /**
  40. * DataProvider base class. This class is the base for all DataProvider
  41. * communication implementations. It uses {@link DataGenerator}s to write
  42. * {@link JsonObject}s representing each data object to be sent to the
  43. * client-side.
  44. *
  45. * @since
  46. */
  47. public class DataCommunicator<T> extends AbstractExtension {
  48. /**
  49. * Simple implementation of collection data provider communication. All data
  50. * is sent by server automatically and no data is requested by client.
  51. */
  52. protected class SimpleDataRequestRpc implements DataRequestRpc {
  53. @Override
  54. public void requestRows(int firstRowIndex, int numberOfRows,
  55. int firstCachedRowIndex, int cacheSize) {
  56. pushRows = Range.withLength(firstRowIndex, numberOfRows);
  57. markAsDirty();
  58. }
  59. @Override
  60. public void dropRows(JsonArray keys) {
  61. for (int i = 0; i < keys.length(); ++i) {
  62. handler.dropActiveData(keys.getString(i));
  63. }
  64. }
  65. }
  66. /**
  67. * A class for handling currently active data and dropping data that is no
  68. * longer needed. Data tracking is based on key string provided by
  69. * {@link DataKeyMapper}.
  70. * <p>
  71. * When the {@link DataCommunicator} is pushing new data to the client-side
  72. * via {@link DataCommunicator#pushData(long, Collection)},
  73. * {@link #addActiveData(Collection)} and {@link #cleanUp(Collection)} are
  74. * called with the same parameter. In the clean up method any dropped data
  75. * objects that are not in the given collection will be cleaned up and
  76. * {@link DataGenerator#destroyData(Object)} will be called for them.
  77. */
  78. protected class ActiveDataHandler
  79. implements Serializable, DataGenerator<T> {
  80. /**
  81. * Set of key strings for currently active data objects
  82. */
  83. private final Set<String> activeData = new HashSet<>();
  84. /**
  85. * Set of key strings for data objects dropped on the client. This set
  86. * is used to clean up old data when it's no longer needed.
  87. */
  88. private final Set<String> droppedData = new HashSet<>();
  89. /**
  90. * Adds given objects as currently active objects.
  91. *
  92. * @param dataObjects
  93. * collection of new active data objects
  94. */
  95. public void addActiveData(Stream<T> dataObjects) {
  96. dataObjects.map(getKeyMapper()::key)
  97. .filter(key -> !activeData.contains(key))
  98. .forEach(activeData::add);
  99. }
  100. /**
  101. * Executes the data destruction for dropped data that is not sent to
  102. * the client. This method takes most recently sent data objects in a
  103. * collection. Doing the clean up like this prevents the
  104. * {@link ActiveDataHandler} from creating new keys for rows that were
  105. * dropped but got re-requested by the client-side. In the case of
  106. * having all data at the client, the collection should be all the data
  107. * in the back end.
  108. *
  109. * @param dataObjects
  110. * collection of most recently sent data to the client
  111. */
  112. public void cleanUp(Stream<T> dataObjects) {
  113. Collection<String> keys = dataObjects.map(getKeyMapper()::key)
  114. .collect(Collectors.toSet());
  115. // Remove still active rows that were dropped by the client
  116. droppedData.removeAll(keys);
  117. // Do data clean up for object no longer needed.
  118. dropData(droppedData);
  119. droppedData.clear();
  120. }
  121. /**
  122. * Marks a data object identified by given key string to be dropped.
  123. *
  124. * @param key
  125. * key string
  126. */
  127. public void dropActiveData(String key) {
  128. if (activeData.contains(key)) {
  129. droppedData.add(key);
  130. }
  131. }
  132. /**
  133. * Returns the collection of all currently active data.
  134. *
  135. * @return collection of active data objects
  136. */
  137. public Collection<T> getActiveData() {
  138. HashSet<T> hashSet = new HashSet<>();
  139. for (String key : activeData) {
  140. hashSet.add(getKeyMapper().get(key));
  141. }
  142. return hashSet;
  143. }
  144. @Override
  145. public void generateData(T data, JsonObject jsonObject) {
  146. // Write the key string for given data object
  147. jsonObject.put(DataCommunicatorConstants.KEY,
  148. getKeyMapper().key(data));
  149. }
  150. @Override
  151. public void destroyData(T data) {
  152. // Remove from active data set
  153. activeData.remove(getKeyMapper().key(data));
  154. // Drop the registered key
  155. getKeyMapper().remove(data);
  156. }
  157. }
  158. private Collection<DataGenerator<T>> generators = new LinkedHashSet<>();
  159. private ActiveDataHandler handler = new ActiveDataHandler();
  160. private DataSource<T> dataSource;
  161. private DataKeyMapper<T> keyMapper;
  162. private boolean reset = false;
  163. private final Set<T> updatedData = new HashSet<>();
  164. private Range pushRows = Range.withLength(0, 40);
  165. private Comparator<T> inMemorySorting;
  166. private Predicate<T> inMemoryFilter;
  167. private List<SortOrder<String>> backEndSorting = new ArrayList<>();
  168. private DataCommunicatorClientRpc rpc;
  169. public DataCommunicator() {
  170. addDataGenerator(handler);
  171. rpc = getRpcProxy(DataCommunicatorClientRpc.class);
  172. registerRpc(createRpc());
  173. keyMapper = createKeyMapper();
  174. }
  175. /**
  176. * Initially and in the case of a reset all data should be pushed to the
  177. * client.
  178. */
  179. @Override
  180. public void beforeClientResponse(boolean initial) {
  181. super.beforeClientResponse(initial);
  182. if (getDataSource() == null) {
  183. return;
  184. }
  185. // FIXME: Sorting and Filtering with Backend
  186. Set<Object> filters = Collections.emptySet();
  187. if (initial || reset) {
  188. int dataSourceSize;
  189. if (getDataSource().isInMemory() && inMemoryFilter != null) {
  190. dataSourceSize = (int) getDataSource().apply(new Query())
  191. .filter(inMemoryFilter).count();
  192. } else {
  193. dataSourceSize = getDataSource().size(new Query(filters));
  194. }
  195. rpc.reset(dataSourceSize);
  196. }
  197. if (!pushRows.isEmpty()) {
  198. int offset = pushRows.getStart();
  199. int limit = pushRows.length();
  200. Stream<T> rowsToPush;
  201. if (getDataSource().isInMemory()) {
  202. // We can safely request all the data when in memory
  203. rowsToPush = getDataSource().apply(new Query());
  204. if (inMemoryFilter != null) {
  205. rowsToPush = rowsToPush.filter(inMemoryFilter);
  206. }
  207. if (inMemorySorting != null) {
  208. rowsToPush = rowsToPush.sorted(inMemorySorting);
  209. }
  210. rowsToPush = rowsToPush.skip(offset).limit(limit);
  211. } else {
  212. Query query = new Query(offset, limit, backEndSorting, filters);
  213. rowsToPush = getDataSource().apply(query);
  214. }
  215. pushData(offset, rowsToPush);
  216. }
  217. if (!updatedData.isEmpty()) {
  218. JsonArray dataArray = Json.createArray();
  219. int i = 0;
  220. for (T data : updatedData) {
  221. dataArray.set(i++, getDataObject(data));
  222. }
  223. rpc.updateData(dataArray);
  224. }
  225. pushRows = Range.withLength(0, 0);
  226. reset = false;
  227. updatedData.clear();
  228. }
  229. /**
  230. * Adds a data generator to this data communicator. Data generators can be
  231. * used to insert custom data to the rows sent to the client. If the data
  232. * generator is already added, does nothing.
  233. *
  234. * @param generator
  235. * the data generator to add, not null
  236. */
  237. public void addDataGenerator(DataGenerator<T> generator) {
  238. Objects.requireNonNull(generator, "generator cannot be null");
  239. generators.add(generator);
  240. }
  241. /**
  242. * Removes a data generator from this data communicator. If there is no such
  243. * data generator, does nothing.
  244. *
  245. * @param generator
  246. * the data generator to remove, not null
  247. */
  248. public void removeDataGenerator(DataGenerator<T> generator) {
  249. Objects.requireNonNull(generator, "generator cannot be null");
  250. generators.remove(generator);
  251. }
  252. /**
  253. * Gets the {@link DataKeyMapper} used by this {@link DataCommunicator}. Key
  254. * mapper can be used to map keys sent to the client-side back to their
  255. * respective data objects.
  256. *
  257. * @return key mapper
  258. */
  259. public DataKeyMapper<T> getKeyMapper() {
  260. return keyMapper;
  261. }
  262. /**
  263. * Sends given collection of data objects to the client-side.
  264. *
  265. * @param firstIndex
  266. * first index of pushed data
  267. * @param data
  268. * data objects to send as an iterable
  269. */
  270. protected void pushData(int firstIndex, Stream<T> data) {
  271. JsonArray dataArray = Json.createArray();
  272. int i = 0;
  273. List<T> collected = data.collect(Collectors.toList());
  274. for (T item : collected) {
  275. dataArray.set(i++, getDataObject(item));
  276. }
  277. rpc.setData(firstIndex, dataArray);
  278. handler.addActiveData(collected.stream());
  279. handler.cleanUp(collected.stream());
  280. }
  281. /**
  282. * Creates the JsonObject for given data object. This method calls all data
  283. * generators for it.
  284. *
  285. * @param data
  286. * data object to be made into a json object
  287. * @return json object representing the data object
  288. */
  289. protected JsonObject getDataObject(T data) {
  290. JsonObject dataObject = Json.createObject();
  291. for (DataGenerator<T> generator : generators) {
  292. generator.generateData(data, dataObject);
  293. }
  294. return dataObject;
  295. }
  296. /**
  297. * Drops data objects identified by given keys from memory. This will invoke
  298. * {@link DataGenerator#destroyData} for each of those objects.
  299. *
  300. * @param droppedKeys
  301. * collection of dropped keys
  302. */
  303. private void dropData(Collection<String> droppedKeys) {
  304. for (String key : droppedKeys) {
  305. assert key != null : "Bookkeepping failure. Dropping a null key";
  306. T data = getKeyMapper().get(key);
  307. assert data != null : "Bookkeepping failure. No data object to match key";
  308. for (DataGenerator<T> g : generators) {
  309. g.destroyData(data);
  310. }
  311. }
  312. }
  313. /**
  314. * Informs the DataProvider that the collection has changed.
  315. */
  316. protected void reset() {
  317. if (reset) {
  318. return;
  319. }
  320. reset = true;
  321. markAsDirty();
  322. }
  323. /**
  324. * Informs the DataProvider that a data object has been updated.
  325. *
  326. * @param data
  327. * updated data object
  328. */
  329. public void refresh(T data) {
  330. if (updatedData.isEmpty()) {
  331. markAsDirty();
  332. }
  333. updatedData.add(data);
  334. }
  335. /**
  336. * Sets the {@link Predicate} to use with in-memory filtering.
  337. *
  338. * @param predicate
  339. * predicate used to filter data
  340. */
  341. public void setInMemoryFilter(Predicate<T> predicate) {
  342. inMemoryFilter = predicate;
  343. reset();
  344. }
  345. /**
  346. * Sets the {@link Comparator} to use with in-memory sorting.
  347. *
  348. * @param comparator
  349. * comparator used to sort data
  350. */
  351. public void setInMemorySorting(Comparator<T> comparator) {
  352. inMemorySorting = comparator;
  353. reset();
  354. }
  355. /**
  356. * Sets the {@link SortOrder}s to use with backend sorting.
  357. *
  358. * @param sortOrder
  359. * list of sort order information to pass to a query
  360. */
  361. public void setBackEndSorting(List<SortOrder<String>> sortOrder) {
  362. backEndSorting.clear();
  363. backEndSorting.addAll(sortOrder);
  364. reset();
  365. }
  366. /**
  367. * Creates a {@link DataKeyMapper} to use with this DataCommunicator.
  368. * <p>
  369. * This method is called from the constructor.
  370. *
  371. * @return key mapper
  372. */
  373. protected DataKeyMapper<T> createKeyMapper() {
  374. return new KeyMapper<>();
  375. }
  376. /**
  377. * Creates a {@link DataRequestRpc} used with this {@link DataCommunicator}.
  378. * <p>
  379. * This method is called from the constructor.
  380. *
  381. * @return data request rpc implementation
  382. */
  383. protected DataRequestRpc createRpc() {
  384. return new SimpleDataRequestRpc();
  385. }
  386. /**
  387. * Gets the current data source from this DataCommunicator.
  388. *
  389. * @return the data source
  390. */
  391. public DataSource<T> getDataSource() {
  392. return dataSource;
  393. }
  394. /**
  395. * Sets the current data source for this DataCommunicator.
  396. *
  397. * @param dataSource
  398. * the data source to set, not null
  399. */
  400. public void setDataSource(DataSource<T> dataSource) {
  401. Objects.requireNonNull(dataSource, "data source cannot be null");
  402. this.dataSource = dataSource;
  403. reset();
  404. }
  405. }