[ 
https://issues.apache.org/jira/browse/NIFI-7808?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17347111#comment-17347111
 ] 

Bill SAndman commented on NIFI-7808:
------------------------------------

Is this the same as NIFI-7830 ? It seems so and can possibly be closed

> PutAzureBlobStorage fails large files with The request body is too large and 
> exceeds the maximum permissible limit
> ------------------------------------------------------------------------------------------------------------------
>
>                 Key: NIFI-7808
>                 URL: https://issues.apache.org/jira/browse/NIFI-7808
>             Project: Apache NiFi
>          Issue Type: Bug
>    Affects Versions: 1.12.0
>         Environment: NiFi for Mac OS using HomeBrew
>            Reporter: Jeff Dix
>            Priority: Major
>
> The PutAzureBlogStorage fails consistently with FlowFiles larger than 50MB 
> with the message _The request body is too large and exceeds the maximum 
> permissible limit_. With the error in some cases the file has been completely 
> uploaded to ADLS Gen 2, but in others the blob is 0 bytes. I have had a 
> couple instances where the error did not occur, and the large FlowFile is 
> successfully processed, but this is rare. It seems to be related to an Azure 
> SDK, so this ticket might be helpful 
> [https://github.com/Azure/azure-storage-blob-go/issues/141.] 
> {code:java}
> // 2020-09-14 10:25:33,221 ERROR [Timer-Driven Process Thread-7] 
> o.a.n.p.a.s.PutAzureDataLakeStorage 
> PutAzureDataLakeStorage[id=7ddcff2f-0174-1000-874e-8e77a63b2d08] Failed to 
> create file on Azure Data Lake Storage: 
> com.azure.storage.file.datalake.models.DataLakeStorageException: Status code 
> 413, "{"error":{"code":"RequestBodyTooLarge","message":"The request body is 
> too large and exceeds the maximum permissible 
> limit.\nRequestId:0aec5867-f01f-0027-3fab-8a48b3000000\nTime:2020-09-14T15:25:06.3113287Z"}}"
> com.azure.storage.file.datalake.models.DataLakeStorageException: Status code 
> 413, "{"error":{"code":"RequestBodyTooLarge","message":"The request body is 
> too large and exceeds the maximum permissible 
> limit.\nRequestId:0aec5867-f01f-0027-3fab-8a48b3000000\nTime:2020-09-14T15:25:06.3113287Z"}}"
>         at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native 
> Method)
>         at 
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>         at 
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>         at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
>         at 
> com.azure.core.http.rest.RestProxy.instantiateUnexpectedException(RestProxy.java:320)
>         at 
> com.azure.core.http.rest.RestProxy.lambda$ensureExpectedStatus$3(RestProxy.java:361)
>         at 
> reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:118)
>         at 
> reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1755)
>         at 
> reactor.core.publisher.MonoCacheTime$CoordinatorSubscriber.signalCached(MonoCacheTime.java:320)
>         at 
> reactor.core.publisher.MonoCacheTime$CoordinatorSubscriber.onNext(MonoCacheTime.java:337)
>         at 
> reactor.core.publisher.Operators$ScalarSubscription.request(Operators.java:2317)
>         at 
> reactor.core.publisher.MonoCacheTime$CoordinatorSubscriber.onSubscribe(MonoCacheTime.java:276)
>         at 
> reactor.core.publisher.FluxFlatMap.trySubscribeScalarMap(FluxFlatMap.java:191)
>         at 
> reactor.core.publisher.MonoFlatMap.subscribeOrReturn(MonoFlatMap.java:53)
>         at 
> reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:57)
>         at reactor.core.publisher.MonoDefer.subscribe(MonoDefer.java:52)
>         at 
> reactor.core.publisher.MonoCacheTime.subscribeOrReturn(MonoCacheTime.java:132)
>         at 
> reactor.core.publisher.InternalMonoOperator.subscribe(InternalMonoOperator.java:57)
>         at 
> reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:150)
>         at 
> reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:114)
>         at 
> reactor.core.publisher.FluxDoFinally$DoFinallySubscriber.onNext(FluxDoFinally.java:123)
>         at 
> reactor.core.publisher.FluxHandle$HandleSubscriber.onNext(FluxHandle.java:112)
>         at 
> reactor.core.publisher.FluxMap$MapConditionalSubscriber.onNext(FluxMap.java:213)
>         at 
> reactor.core.publisher.FluxDoFinally$DoFinallySubscriber.onNext(FluxDoFinally.java:123)
>         at 
> reactor.core.publisher.FluxHandleFuseable$HandleFuseableSubscriber.onNext(FluxHandleFuseable.java:178)
>         at 
> reactor.core.publisher.FluxContextStart$ContextStartSubscriber.onNext(FluxContextStart.java:96)
>         at 
> reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1755)
>         at 
> reactor.core.publisher.MonoCollectList$MonoCollectListSubscriber.onComplete(MonoCollectList.java:121)
>         at 
> reactor.core.publisher.FluxPeek$PeekSubscriber.onComplete(FluxPeek.java:252)
>         at 
> reactor.core.publisher.FluxMap$MapSubscriber.onComplete(FluxMap.java:136)
>         at 
> reactor.netty.channel.FluxReceive.onInboundComplete(FluxReceive.java:366)
>         at 
> reactor.netty.channel.ChannelOperations.onInboundComplete(ChannelOperations.java:367)
>         at 
> reactor.netty.channel.ChannelOperations.terminate(ChannelOperations.java:423)
>         at 
> reactor.netty.http.client.HttpClientOperations.onInboundNext(HttpClientOperations.java:607)
>         at 
> reactor.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:96)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)
>         at 
> io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.fireChannelRead(CombinedChannelDuplexHandler.java:436)
>         at 
> io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:324)
>         at 
> io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:296)
>         at 
> io.netty.channel.CombinedChannelDuplexHandler.channelRead(CombinedChannelDuplexHandler.java:251)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)
>         at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1518)
>         at 
> io.netty.handler.ssl.SslHandler.decodeNonJdkCompatible(SslHandler.java:1279)
>         at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1316)
>         at 
> io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:501)
>         at 
> io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:440)
>         at 
> io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)
>         at 
> io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)
>         at 
> io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919)
>         at 
> io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163)
>         at 
> io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714)
>         at 
> io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:650)
>         at 
> io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:576)
>         at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493)
>         at 
> io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989)
>         at 
> io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
>         at 
> io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
>         at java.lang.Thread.run(Thread.java:748)
>         Suppressed: java.lang.Exception: #block terminated with an error
>                 at 
> reactor.core.publisher.BlockingSingleSubscriber.blockingGet(BlockingSingleSubscriber.java:99)
>                 at reactor.core.publisher.Mono.block(Mono.java:1678)
>                 at 
> com.azure.storage.common.implementation.StorageImplUtils.blockWithOptionalTimeout(StorageImplUtils.java:99)
>                 at 
> com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse(DataLakeFileClient.java:269)
>                 at 
> com.azure.storage.file.datalake.DataLakeFileClient.append(DataLakeFileClient.java:233)
>                 at 
> org.apache.nifi.processors.azure.storage.PutAzureDataLakeStorage.onTrigger(PutAzureDataLakeStorage.java:124)
>                 at 
> org.apache.nifi.processor.AbstractProcessor.onTrigger(AbstractProcessor.java:27)
>                 at 
> org.apache.nifi.controller.StandardProcessorNode.onTrigger(StandardProcessorNode.java:1174)
>                 at 
> org.apache.nifi.controller.tasks.ConnectableTask.invoke(ConnectableTask.java:213)
>                 at 
> org.apache.nifi.controller.scheduling.TimerDrivenSchedulingAgent$1.run(TimerDrivenSchedulingAgent.java:117)
>                 at 
> org.apache.nifi.engine.FlowEngine$2.run(FlowEngine.java:110)
>                 at 
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
>                 at 
> java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
>                 at 
> java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
>                 at 
> java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
>                 at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>                 at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>                 ... 1 common frames omitted
> {code}
>  



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to