001package com.avaje.ebean;
002
003import com.avaje.ebeanservice.docstore.api.DocQueryRequest;
004import org.jetbrains.annotations.Nullable;
005
006import java.io.IOException;
007import java.util.List;
008import java.util.Map;
009
010/**
011 * Document storage operations.
012 */
013public interface DocumentStore {
014
015  /**
016   * Update the associated document store using the result of the query.
017   * <p>
018   * This will execute the query against the database creating a document for each
019   * bean graph and sending this to the document store.
020   * </p>
021   * <p>
022   * Note that the select and fetch paths of the query is set for you to match the
023   * document structure needed based on <code>@DocStore</code> and <code>@DocStoreEmbedded</code>
024   * so what this query requires is the predicates only.
025   * </p>
026   * <p>
027   * This query will be executed using findEach so it is safe to use a query
028   * that will fetch a lot of beans. The default bulkBatchSize is used.
029   * </p>
030   *
031   * @param query The query that selects object to send to the document store.
032   */
033  <T> void indexByQuery(Query<T> query);
034
035  /**
036   * Update the associated document store index using the result of the query additionally specifying a
037   * bulkBatchSize to use for sending the messages to ElasticSearch.
038   *
039   * @param query         The query that selects object to send to the document store.
040   * @param bulkBatchSize The batch size to use when bulk sending to the document store.
041   */
042  <T> void indexByQuery(Query<T> query, int bulkBatchSize);
043
044  /**
045   * Update the document store for all beans of this type.
046   * <p>
047   * This is the same as indexByQuery where the query has no predicates and so fetches all rows.
048   * </p>
049   */
050  void indexAll(Class<?> beanType);
051
052  /**
053   * Return the bean by fetching it's content from the document store.
054   * If the document is not found null is returned.
055   * <p>
056   * Typically this is called indirectly by findUnique() on the query.
057   * </p>
058   * <pre>{@code
059   *
060   * Customer customer =
061   *   server.find(Customer.class)
062   *     .setUseDocStore(true)
063   *     .setId(42)
064   *     .findUnique();
065   *
066   * }</pre>
067   */
068  @Nullable
069  <T> T find(DocQueryRequest<T> request);
070
071  /**
072   * Execute the find list query. This request is prepared to execute secondary queries.
073   * <p>
074   * Typically this is called indirectly by findList() on the query that has setUseDocStore(true).
075   * </p>
076   * <pre>{@code
077   *
078   * List<Customer> newCustomers =
079   *  server.find(Customer.class)
080   *    .setUseDocStore(true)
081   *    .where().eq("status, Customer.Status.NEW)
082   *    .findList();
083   *
084   * }</pre>
085   */
086  <T> List<T> findList(DocQueryRequest<T> request);
087
088  /**
089   * Execute the query against the document store returning the paged list.
090   * <p>
091   * The query should have <code>firstRow</code> or <code>maxRows</code> set prior to calling this method.
092   * </p>
093   * <p>
094   * Typically this is called indirectly by findPagedList() on the query that has setUseDocStore(true).
095   * </p>
096   *
097   * <pre>{@code
098   *
099   * PagedList<Customer> newCustomers =
100   *  server.find(Customer.class)
101   *    .setUseDocStore(true)
102   *    .where().eq("status, Customer.Status.NEW)
103   *    .setMaxRows(50)
104   *    .findPagedList();
105   *
106   * }</pre>
107   *
108   */
109  <T> PagedList<T> findPagedList(DocQueryRequest<T> request);
110
111  /**
112   * Execute the query against the document store with the expectation of a large set of results
113   * that are processed in a scrolling resultSet fashion.
114   * <p>
115   * For example, with the ElasticSearch doc store this uses SCROLL.
116   * </p>
117   * <p>
118   * Typically this is called indirectly by findEach() on the query that has setUseDocStore(true).
119   * </p>
120   *
121   * <pre>{@code
122   *
123   *  server.find(Order.class)
124   *    .setUseDocStore(true)
125   *    .where()... // perhaps add predicates
126   *    .findEach(new QueryEachConsumer<Order>() {
127   *      @Override
128   *      public void accept(Order bean) {
129   *        // process the bean
130   *      }
131   *    });
132   *
133   * }</pre>
134   */
135  <T> void findEach(DocQueryRequest<T> query, QueryEachConsumer<T> consumer);
136
137  /**
138   * Execute the query against the document store with the expectation of a large set of results
139   * that are processed in a scrolling resultSet fashion.
140   * <p>
141   * Unlike findEach() this provides the opportunity to stop iterating through the large query.
142   * </p>
143   * <p>
144   * For example, with the ElasticSearch doc store this uses SCROLL.
145   * </p>
146   * <p>
147   * Typically this is called indirectly by findEachWhile() on the query that has setUseDocStore(true).
148   * </p>
149   *
150   *
151   * <pre>{@code
152   *
153   *  server.find(Order.class)
154   *    .setUseDocStore(true)
155   *    .where()... // perhaps add predicates
156   *    .findEachWhile(new QueryEachWhileConsumer<Order>() {
157   *      @Override
158   *      public void accept(Order bean) {
159   *        // process the bean
160   *
161   *        // return true to continue, false to stop
162   *        // boolean shouldContinue = ...
163   *        return shouldContinue;
164   *      }
165   *    });
166   *
167   * }</pre>
168   */
169  <T> void findEachWhile(DocQueryRequest<T> query, QueryEachWhileConsumer<T> consumer);
170
171  /**
172   * Process the queue entries sending updates to the document store or queuing them for later processing.
173   */
174  long process(List<DocStoreQueueEntry> queueEntries) throws IOException;
175
176  /**
177   * Drop the index from the document store (similar to DDL drop table).
178   *
179   * <pre>{@code
180   *
181   *   DocumentStore documentStore = server.docStore();
182   *
183   *   documentStore.dropIndex("product_copy");
184   *
185   * }</pre>
186   *
187   */
188  void dropIndex(String indexName);
189
190  /**
191   * Create an index given a mapping file as a resource in the classPath (similar to DDL create table).
192   *
193   * <pre>{@code
194   *
195   *   DocumentStore documentStore = server.docStore();
196   *
197   *   // uses product_copy.mapping.json resource
198   *   // ... to define mappings for the index
199   *
200   *   documentStore.createIndex("product_copy", null);
201   *
202   * }</pre>
203   *
204   * @param indexName       the name of the new index
205   * @param alias           the alias of the index
206   */
207  void createIndex(String indexName, String alias);
208
209  /**
210   * Modify the settings on an index.
211   * <p>
212   * For example, this can be used be used to set elasticSearch refresh_interval
213   * on an index before a bulk update.
214   * </p>
215   * <pre>{@code
216   *
217   *   // refresh_interval -1 ... disable refresh while bulk loading
218   *
219   *   Map<String,Object> settings = new LinkedHashMap<>();
220   *   settings.put("refresh_interval", "-1");
221   *
222   *   documentStore.indexSettings("product", settings);
223   *
224   * }</pre>
225   *
226   * <pre>{@code
227   *
228   *   // refresh_interval 1s ... restore after bulk loading
229   *
230   *   Map<String,Object> settings = new LinkedHashMap<>();
231   *   settings.put("refresh_interval", "1s");
232   *
233   *   documentStore.indexSettings("product", settings);
234   *
235   * }</pre>
236   *
237   * @param indexName the name of the index to update settings on
238   * @param settings  the settings to set on the index
239   */
240  void indexSettings(String indexName, Map<String, Object> settings);
241
242  /**
243   * Copy the index to a new index.
244   * <p>
245   * This copy process does not use the database but instead will copy from the source index to a destination index.
246   * </p>
247   *
248   * <pre>{@code
249   *
250   *  long copyCount = documentStore.copyIndex(Product.class, "product_copy");
251   *
252   * }</pre>
253   *
254   * @param beanType The bean type of the source index
255   * @param newIndex The name of the index to copy to
256   * @return the number of documents copied to the new index
257   */
258  long copyIndex(Class<?> beanType, String newIndex);
259
260  /**
261   * Copy entries from an index to a new index but limiting to documents that have been
262   * modified since the sinceEpochMillis time.
263   * <p>
264   * To support this the document needs to have a <code>@WhenModified</code> property.
265   * </p>
266   *
267   * <pre>{@code
268   *
269   *  long copyCount = documentStore.copyIndex(Product.class, "product_copy", sinceMillis);
270   *
271   * }</pre>
272   *
273   * @param beanType The bean type of the source index
274   * @param newIndex The name of the index to copy to
275   * @return the number of documents copied to the new index
276   */
277  long copyIndex(Class<?> beanType, String newIndex, long sinceEpochMillis);
278
279  /**
280   * Copy from a source index to a new index taking only the documents
281   * matching the given query.
282   *
283   * <pre>{@code
284   *
285   *  // predicates to select the source documents to copy
286   *  Query<Product> query = server.find(Product.class)
287   *    .where()
288   *      .ge("whenModified", new Timestamp(since))
289   *      .ge("name", "A")
290   *      .lt("name", "D")
291   *      .query();
292   *
293   *  // copy from the source index to "product_copy" index
294   *  long copyCount = documentStore.copyIndex(query, "product_copy", 1000);
295   *
296   * }</pre>
297   *
298   * @param query         The query to select the source documents to copy
299   * @param newIndex      The target index to copy the documents to
300   * @param bulkBatchSize The ElasticSearch bulk batch size, if 0 uses the default.
301   * @return The number of documents copied to the new index.
302   */
303  long copyIndex(Query<?> query, String newIndex, int bulkBatchSize);
304}