001/*-
002 * #%L
003 * HAPI FHIR JPA Server
004 * %%
005 * Copyright (C) 2014 - 2024 Smile CDR, Inc.
006 * %%
007 * Licensed under the Apache License, Version 2.0 (the "License");
008 * you may not use this file except in compliance with the License.
009 * You may obtain a copy of the License at
010 *
011 *      http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 * #L%
019 */
020package ca.uhn.fhir.jpa.util;
021
022import ca.uhn.fhir.jpa.search.builder.SearchBuilder;
023import ca.uhn.fhir.util.TaskChunker;
024
025import java.util.Collection;
026import java.util.List;
027import java.util.function.Consumer;
028import java.util.stream.Stream;
029
030/**
031 * As always, Oracle can't handle things that other databases don't mind.. In this
032 * case it doesn't like more than ~1000 IDs in a single load, so we break this up
033 * if it's lots of IDs. I suppose maybe we should be doing this as a join anyhow
034 * but this should work too. Sigh.
035 */
036public class QueryChunker<T> extends TaskChunker<T> {
037
038        public static <T> void chunk(Collection<T> theInput, Consumer<List<T>> theBatchConsumer) {
039                chunk(theInput, SearchBuilder.getMaximumPageSize(), theBatchConsumer);
040        }
041
042        public static <T> Stream<List<T>> chunk(Stream<T> theStream) {
043                return chunk(theStream, SearchBuilder.getMaximumPageSize());
044        }
045}