concatMapSingleDelayError(@NonNull Function super T, ? extends SingleSource extends R>> mapper,
+ boolean tillTheEnd, int prefetch) {
Objects.requireNonNull(mapper, "mapper is null");
ObjectHelper.verifyPositive(prefetch, "prefetch");
return RxJavaPlugins.onAssembly(new FlowableConcatMapSingle<>(this, mapper, tillTheEnd ? ErrorMode.END : ErrorMode.BOUNDARY, prefetch));
@@ -11075,7 +11084,7 @@ public final Disposable forEachWhile(@NonNull Predicate super T> onNext, @NonN
* Note that the {@code GroupedFlowable}s should be subscribed to as soon as possible, otherwise,
* the unconsumed groups may starve other groups due to the internal backpressure
* coordination of the {@code groupBy} operator. Such hangs can be usually avoided by using
- * {@link #flatMap(Function, int)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
+ * {@link #flatMap(Function, FlatMapConfig)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
* value to be greater or equal to the expected number of groups, possibly using
* {@link Integer#MAX_VALUE} if the number of expected groups is unknown.
*
@@ -11133,7 +11142,7 @@ public final Disposable forEachWhile(@NonNull Predicate super T> onNext, @NonN
* Note that the {@code GroupedFlowable}s should be subscribed to as soon as possible, otherwise,
* the unconsumed groups may starve other groups due to the internal backpressure
* coordination of the {@code groupBy} operator. Such hangs can be usually avoided by using
- * {@link #flatMap(Function, int)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
+ * {@link #flatMap(Function, FlatMapConfig)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
* value to be greater or equal to the expected number of groups, possibly using
* {@link Integer#MAX_VALUE} if the number of expected groups is unknown.
*
@@ -11192,7 +11201,7 @@ public final Disposable forEachWhile(@NonNull Predicate super T> onNext, @NonN
* Note that the {@code GroupedFlowable}s should be subscribed to as soon as possible, otherwise,
* the unconsumed groups may starve other groups due to the internal backpressure
* coordination of the {@code groupBy} operator. Such hangs can be usually avoided by using
- * {@link #flatMap(Function, int)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
+ * {@link #flatMap(Function, FlatMapConfig)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
* value to be greater or equal to the expected number of groups, possibly using
* {@link Integer#MAX_VALUE} if the number of expected groups is unknown.
*
@@ -11256,7 +11265,7 @@ public final Disposable forEachWhile(@NonNull Predicate super T> onNext, @NonN
* Note that the {@code GroupedFlowable}s should be subscribed to as soon as possible, otherwise,
* the unconsumed groups may starve other groups due to the internal backpressure
* coordination of the {@code groupBy} operator. Such hangs can be usually avoided by using
- * {@link #flatMap(Function, int)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
+ * {@link #flatMap(Function, FlatMapConfig)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
* value to be greater or equal to the expected number of groups, possibly using
* {@link Integer#MAX_VALUE} if the number of expected groups is unknown.
*
@@ -11321,7 +11330,7 @@ public final Disposable forEachWhile(@NonNull Predicate super T> onNext, @NonN
* Note that the {@code GroupedFlowable}s should be subscribed to as soon as possible, otherwise,
* the unconsumed groups may starve other groups due to the internal backpressure
* coordination of the {@code groupBy} operator. Such hangs can be usually avoided by using
- * {@link #flatMap(Function, int)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
+ * {@link #flatMap(Function, FlatMapConfig)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
* value to be greater or equal to the expected number of groups, possibly using
* {@link Integer#MAX_VALUE} if the number of expected groups is unknown.
*
@@ -11394,7 +11403,9 @@ public final Disposable forEachWhile(@NonNull Predicate super T> onNext, @NonN
*
*
The map created by an {@code evictingMapFactory} must be thread-safe.
*
- *
An example of an {@code evictingMapFactory} using CacheBuilder from the Guava library is below:
+ *
An example of an {@code evictingMapFactory} using
+ * CacheBuilder
+ * from the Guava library is below:
*
*
* Function<Consumer<Object>, Map<Integer, Object>> evictingMapFactory =
@@ -11434,7 +11445,7 @@ public final Disposable forEachWhile(@NonNull Predicate super T> onNext, @NonN
* Note that the {@code GroupedFlowable}s should be subscribed to as soon as possible, otherwise,
* the unconsumed groups may starve other groups due to the internal backpressure
* coordination of the {@code groupBy} operator. Such hangs can be usually avoided by using
- * {@link #flatMap(Function, int)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
+ * {@link #flatMap(Function, FlatMapConfig)} or {@link #concatMapEager(Function, int, int)} and overriding the default maximum concurrency
* value to be greater or equal to the expected number of groups, possibly using
* {@link Integer#MAX_VALUE} if the number of expected groups is unknown.
*
@@ -12605,7 +12616,8 @@ public final Flowable onBackpressureBuffer(long capacity, @Nullable Action on
@BackpressureSupport(BackpressureKind.SPECIAL)
@SchedulerSupport(SchedulerSupport.NONE)
@Experimental
- public final Flowable onBackpressureBuffer(long capacity, @Nullable Action onOverflow, @NonNull BackpressureOverflowStrategy overflowStrategy, @NonNull Consumer super T> onDropped) {
+ public final Flowable onBackpressureBuffer(long capacity, @Nullable Action onOverflow,
+ @NonNull BackpressureOverflowStrategy overflowStrategy, @NonNull Consumer super T> onDropped) {
Objects.requireNonNull(overflowStrategy, "overflowStrategy is null");
Objects.requireNonNull(onDropped, "onDropped is null");
ObjectHelper.verifyPositive(capacity, "capacity");
@@ -12738,7 +12750,7 @@ public final Flowable onBackpressureLatest() {
@SchedulerSupport(SchedulerSupport.NONE)
@NonNull
@Experimental
- public final Flowable onBackpressureLatest(@NonNull Consumer super T> onDropped) {
+ public final Flowable onBackpressureLatest(@NonNull Consumer super T> onDropped) {
Objects.requireNonNull(onDropped, "onDropped is null");
return RxJavaPlugins.onAssembly(new FlowableOnBackpressureLatest<>(this, onDropped));
}
@@ -13769,7 +13781,8 @@ public final ConnectableFlowable replay() {
@BackpressureSupport(BackpressureKind.FULL)
@SchedulerSupport(SchedulerSupport.COMPUTATION)
@NonNull
- public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector, int bufferSize, long time, @NonNull TimeUnit unit) {
+ public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector,
+ int bufferSize, long time, @NonNull TimeUnit unit) {
return replay(selector, bufferSize, time, unit, Schedulers.computation());
}
@@ -13815,7 +13828,8 @@ public final ConnectableFlowable replay() {
@NonNull
@BackpressureSupport(BackpressureKind.FULL)
@SchedulerSupport(SchedulerSupport.CUSTOM)
- public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector, int bufferSize, long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler) {
+ public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector,
+ int bufferSize, long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler) {
Objects.requireNonNull(selector, "selector is null");
Objects.requireNonNull(unit, "unit is null");
ObjectHelper.verifyPositive(bufferSize, "bufferSize");
@@ -13868,7 +13882,8 @@ public final ConnectableFlowable replay() {
@NonNull
@BackpressureSupport(BackpressureKind.FULL)
@SchedulerSupport(SchedulerSupport.CUSTOM)
- public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector, int bufferSize, long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler, boolean eagerTruncate) {
+ public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector,
+ int bufferSize, long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler, boolean eagerTruncate) {
Objects.requireNonNull(selector, "selector is null");
Objects.requireNonNull(unit, "unit is null");
ObjectHelper.verifyPositive(bufferSize, "bufferSize");
@@ -13948,7 +13963,8 @@ public final ConnectableFlowable replay() {
@NonNull
@BackpressureSupport(BackpressureKind.FULL)
@SchedulerSupport(SchedulerSupport.CUSTOM)
- public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector, long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler) {
+ public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector,
+ long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler) {
Objects.requireNonNull(selector, "selector is null");
Objects.requireNonNull(unit, "unit is null");
Objects.requireNonNull(scheduler, "scheduler is null");
@@ -13992,7 +14008,8 @@ public final ConnectableFlowable replay() {
@NonNull
@BackpressureSupport(BackpressureKind.FULL)
@SchedulerSupport(SchedulerSupport.CUSTOM)
- public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector, long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler, boolean eagerTruncate) {
+ public final <@NonNull R> Flowable replay(@NonNull Function super Flowable, @NonNull ? extends Publisher> selector,
+ long time, @NonNull TimeUnit unit, @NonNull Scheduler scheduler, boolean eagerTruncate) {
Objects.requireNonNull(selector, "selector is null");
Objects.requireNonNull(unit, "unit is null");
Objects.requireNonNull(scheduler, "scheduler is null");
@@ -15993,7 +16010,9 @@ public final void subscribe(@NonNull FlowableSubscriber super T> subscriber) {
try {
Subscriber super T> flowableSubscriber = RxJavaPlugins.onSubscribe(this, subscriber);
- Objects.requireNonNull(flowableSubscriber, "The RxJavaPlugins.onSubscribe hook returned a null FlowableSubscriber. Please check the handler provided to RxJavaPlugins.setOnFlowableSubscribe for invalid null returns. Further reading: https://github.com/ReactiveX/RxJava/wiki/Plugins");
+ Objects.requireNonNull(flowableSubscriber, "The RxJavaPlugins.onSubscribe hook returned a null FlowableSubscriber. "
+ + "Please check the handler provided to RxJavaPlugins.setOnFlowableSubscribe for invalid null returns. "
+ + "Further reading: https://github.com/ReactiveX/RxJava/wiki/Plugins");
subscribeActual(flowableSubscriber);
} catch (NullPointerException e) { // NOPMD
@@ -17848,7 +17867,8 @@ public final Flowable> timeInterval(@NonNull TimeUnit unit, @NonNull Sc
@NonNull
@BackpressureSupport(BackpressureKind.FULL)
@SchedulerSupport(SchedulerSupport.NONE)
- public final <@NonNull V> Flowable timeout(@NonNull Function super T, @NonNull ? extends Publisher> itemTimeoutIndicator, @NonNull Publisher extends T> fallback) {
+ public final <@NonNull V> Flowable timeout(@NonNull Function super T, @NonNull ? extends Publisher> itemTimeoutIndicator,
+ @NonNull Publisher extends T> fallback) {
Objects.requireNonNull(fallback, "fallback is null");
return timeout0(null, itemTimeoutIndicator, fallback);
}
@@ -18405,7 +18425,8 @@ public final Single> toList(int capacityHint) {
@NonNull
@BackpressureSupport(BackpressureKind.UNBOUNDED_IN)
@SchedulerSupport(SchedulerSupport.NONE)
- public final <@NonNull K, @NonNull V> Single