I'm trying the Pyflink - Kafka connection. I'm using Python 3.11 on Pycharm, with the flink-sql-connector-kafka-3.3.0-1.20.jar and apache-flink 1.20. I'm running Kafka on Docker, and I've tested the producer who can correctly send message to the topic I'm trying with a simple code like:
from pyflink.datastream import StreamExecutionEnvironment, RuntimeExecutionMode
config = Configuration()
config.set_string("classloader.resolve-order", "parent-first")
config.set_string("rest.port", "46167")
env = StreamExecutionEnvironment.get_execution_environment(config)
env.set_runtime_mode(RuntimeExecutionMode.STREAMING)
env.set_parallelism(3)
env.configure(config)
jars_path = "C:\\Users\\matti\\Documents\\GitHub\\iqa\\jars"
jar_files = [os.path.join(jars_path, f) for f in os.listdir(jars_path) if f.endswith(".jar")]
env.add_jars(*[f"file:///{jar}" for jar in jar_files])
kafka_source: KafkaSource = (KafkaSource.builder().set_bootstrap_servers('localhost:9092')
.set_group_id('frameanalyzergroup')
.set_starting_offsets(KafkaOffsetsInitializer.earliest())
.set_value_only_deserializer(SimpleStringSchema())
.set_topics('videoframes').build())
visits_input_data_stream: DataStream = env.from_source(
source=kafka_source,
watermark_strategy=WatermarkStrategy.no_watermarks(),
source_name="videoframes"
)
transformed_stream = visits_input_data_stream.map(test_function)
env.execute("Test")
The code works for a while, I can read some message then I receive the following error:
py4j.protocol.Py4JJavaError: An error occurred while calling o0.execute.
.apache.flink.runtime.client.JobExecutionException: Job execution failed.
at .apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
at .apache.flink.runtime.minicluster.MiniClusterJobClient.lambda$getJobExecutionResult$3(MiniClusterJobClient.java:141)
at java.base/java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:646)
at java.base/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510)
at java.base/java.util.concurrent.CompletableFutureplete(CompletableFuture.java:2179)
at .apache.flink.runtime.rpc.pekko.PekkoInvocationHandler.lambda$invokeRpc$1(PekkoInvocationHandler.java:268)
at java.base/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863)
at java.base/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841)
at java.base/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510)
at java.base/java.util.concurrent.CompletableFutureplete(CompletableFuture.java:2179)
at .apache.flink.util.concurrent.FutureUtils.doForward(FutureUtils.java:1287)
at .apache.flink.runtime.concurrent.ClassLoadingUtils.lambda$null$1(ClassLoadingUtils.java:93)
at .apache.flink.runtime.concurrent.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
at .apache.flink.runtime.concurrent.ClassLoadingUtils.lambda$guardCompletionWithContextClassLoader$2(ClassLoadingUtils.java:92)
at java.base/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863)
at java.base/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841)
at java.base/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510)
at java.base/java.util.concurrent.CompletableFutureplete(CompletableFuture.java:2179)
at .apache.flink.runtime.concurrent.pekko.ScalaFutureUtils$1.onComplete(ScalaFutureUtils.java:47)
at .apache.pekko.dispatch.OnComplete.internal(Future.scala:310)
at .apache.pekko.dispatch.OnComplete.internal(Future.scala:307)
at .apache.pekko.dispatch.japi$CallbackBridge.apply(Future.scala:234)
at .apache.pekko.dispatch.japi$CallbackBridge.apply(Future.scala:231)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
at .apache.flink.runtime.concurrent.pekko.ScalaFutureUtils$DirectExecutionContext.execute(ScalaFutureUtils.java:65)
at scala.concurrent.impl.CallbackRunnable.executeWithValue(Promise.scala:72)
at scala.concurrent.impl.Promise$DefaultPromise.$anonfun$tryComplete$1(Promise.scala:288)
at scala.concurrent.impl.Promise$DefaultPromise.$anonfun$tryComplete$1$adapted(Promise.scala:288)
at scala.concurrent.impl.Promise$DefaultPromise.tryComplete(Promise.scala:288)
at .apache.pekko.pattern.PromiseActorRef.$bang(AskSupport.scala:629)
at .apache.pekko.pattern.PipeToSupport$PipeableFuture$$anonfun$pipeTo$1.applyOrElse(PipeToSupport.scala:34)
at .apache.pekko.pattern.PipeToSupport$PipeableFuture$$anonfun$pipeTo$1.applyOrElse(PipeToSupport.scala:33)
at scala.concurrent.Future.$anonfun$andThen$1(Future.scala:536)
at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
at .apache.pekko.dispatch.BatchingExecutor$AbstractBatch.processBatch(BatchingExecutor.scala:73)
at .apache.pekko.dispatch.BatchingExecutor$BlockableBatch.$anonfun$run$1(BatchingExecutor.scala:110)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at scala.concurrent.BlockContext$.withBlockContext(BlockContext.scala:85)
at .apache.pekko.dispatch.BatchingExecutor$BlockableBatch.run(BatchingExecutor.scala:110)
at .apache.pekko.dispatch.TaskInvocation.run(AbstractDispatcher.scala:59)
at .apache.pekko.dispatch.ForkJoinExecutorConfigurator$PekkoForkJoinTask.exec(ForkJoinExecutorConfigurator.scala:57)
at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:507)
at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1489)
at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:2071)
at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:2033)
at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:187)
Caused by: .apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
at .apache.flink.runtime.executiongraph.failover.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:219)
at .apache.flink.runtime.executiongraph.failover.ExecutionFailureHandler.handleFailureAndReport(ExecutionFailureHandler.java:166)
at .apache.flink.runtime.executiongraph.failover.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:121)
at .apache.flink.runtime.scheduler.DefaultScheduler.recordTaskFailure(DefaultScheduler.java:281)
at .apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:272)
at .apache.flink.runtime.scheduler.DefaultScheduler.onTaskFailed(DefaultScheduler.java:265)
at .apache.flink.runtime.scheduler.SchedulerBase.onTaskExecutionStateUpdate(SchedulerBase.java:787)
at .apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:764)
at .apache.flink.runtime.scheduler.SchedulerNG.updateTaskExecutionState(SchedulerNG.java:83)
at .apache.flink.runtime.jobmaster.JobMaster.updateTaskExecutionState(JobMaster.java:515)
at java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:103)
at java.base/java.lang.reflect.Method.invoke(Method.java:580)
at .apache.flink.runtime.rpc.pekko.PekkoRpcActor.lambda$handleRpcInvocation$1(PekkoRpcActor.java:318)
at .apache.flink.runtime.concurrent.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:83)
at .apache.flink.runtime.rpc.pekko.PekkoRpcActor.handleRpcInvocation(PekkoRpcActor.java:316)
at .apache.flink.runtime.rpc.pekko.PekkoRpcActor.handleRpcMessage(PekkoRpcActor.java:229)
at .apache.flink.runtime.rpc.pekko.FencedPekkoRpcActor.handleRpcMessage(FencedPekkoRpcActor.java:88)
at .apache.flink.runtime.rpc.pekko.PekkoRpcActor.handleMessage(PekkoRpcActor.java:174)
at .apache.pekko.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:33)
at .apache.pekko.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:29)
at scala.PartialFunction.applyOrElse(PartialFunction.scala:127)
at scala.PartialFunction.applyOrElse$(PartialFunction.scala:126)
at .apache.pekko.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:29)
at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:175)
at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:176)
at .apache.pekko.actor.Actor.aroundReceive(Actor.scala:547)
at .apache.pekko.actor.Actor.aroundReceive$(Actor.scala:545)
at .apache.pekko.actor.AbstractActor.aroundReceive(AbstractActor.scala:229)
at .apache.pekko.actor.ActorCell.receiveMessage(ActorCell.scala:590)
at .apache.pekko.actor.ActorCell.invoke(ActorCell.scala:557)
at .apache.pekko.dispatch.Mailbox.processMailbox(Mailbox.scala:280)
at .apache.pekko.dispatch.Mailbox.run(Mailbox.scala:241)
at .apache.pekko.dispatch.Mailbox.exec(Mailbox.scala:253)
5 more
Caused by: java.lang.RuntimeException: Failed to create stage bundle factory! INFO:root:Initializing Python harness: C:\Users\matti\Documents\GitHub\iqa\.venv\Lib\site-packages\pyflink\fn_execution\beam\beam_boot.py --id=15-1 --provision_endpoint=localhost:62519
INFO:root:Starting up Python harness in loopback mode.
at .apache.flink.streaming.api.runners.python.beam.BeamPythonFunctionRunner.createStageBundleFactory(BeamPythonFunctionRunner.java:656)
at .apache.flink.streaming.api.runners.python.beam.BeamPythonFunctionRunner.open(BeamPythonFunctionRunner.java:281)
at .apache.flink.streaming.api.operators.python.process.AbstractExternalPythonFunctionOperator.open(AbstractExternalPythonFunctionOperator.java:57)
at .apache.flink.streaming.api.operators.python.process.AbstractExternalDataStreamPythonFunctionOperator.open(AbstractExternalDataStreamPythonFunctionOperator.java:85)
at .apache.flink.streaming.api.operators.python.process.AbstractExternalOneInputPythonFunctionOperator.open(AbstractExternalOneInputPythonFunctionOperator.java:117)
at .apache.flink.streaming.api.operators.python.process.ExternalPythonProcessOperator.open(ExternalPythonProcessOperator.java:64)
at .apache.flink.streaming.runtime.tasks.RegularOperatorChain.initializeStateAndOpenOperators(RegularOperatorChain.java:107)
at .apache.flink.streaming.runtime.tasks.StreamTask.restoreStateAndGates(StreamTask.java:858)
at .apache.flink.streaming.runtime.tasks.StreamTask.lambda$restoreInternal$5(StreamTask.java:812)
at .apache.flink.streaming.runtime.tasks.StreamTaskActionExecutor$SynchronizedStreamTaskActionExecutor.call(StreamTaskActionExecutor.java:100)
at .apache.flink.streaming.runtime.tasks.StreamTask.restoreInternal(StreamTask.java:812)
at .apache.flink.streaming.runtime.tasks.StreamTask.restore(StreamTask.java:771)
at .apache.flink.runtime.taskmanager.Task.runWithSystemExitMonitoring(Task.java:970)
at .apache.flink.runtime.taskmanager.Task.restoreAndInvoke(Task.java:939)
at .apache.flink.runtime.taskmanager.Task.doRun(Task.java:763)
at .apache.flink.runtime.taskmanager.Task.run(Task.java:575)
at java.base/java.lang.Thread.run(Thread.java:1570)
Caused by: .apache.beam.vendor.guava.v26_0_jre.googlemon.util.concurrent.UncheckedExecutionException: java.lang.IllegalStateException: Process died with exit code 0
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache$Segment.get(LocalCache.java:2050)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache.get(LocalCache.java:3952)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache.getOrLoad(LocalCache.java:3974)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache$LocalLoadingCache.get(LocalCache.java:4958)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache$LocalLoadingCache.getUnchecked(LocalCache.java:4964)
at .apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$SimpleStageBundleFactory.<init>(DefaultJobBundleFactory.java:498)
at .apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$SimpleStageBundleFactory.<init>(DefaultJobBundleFactory.java:482)
at .apache.beam.runners.fnexecution.control.DefaultJobBundleFactory.forStage(DefaultJobBundleFactory.java:342)
at .apache.flink.streaming.api.runners.python.beam.BeamPythonFunctionRunner.createStageBundleFactory(BeamPythonFunctionRunner.java:654)
16 more
Caused by: java.lang.IllegalStateException: Process died with exit code 0
at .apache.beam.runners.fnexecution.environment.ProcessManager$RunningProcess.isAliveOrThrow(ProcessManager.java:75)
at .apache.beam.runners.fnexecution.environment.ProcessEnvironmentFactory.createEnvironment(ProcessEnvironmentFactory.java:110)
at .apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$1.load(DefaultJobBundleFactory.java:284)
at .apache.beam.runners.fnexecution.control.DefaultJobBundleFactory$1.load(DefaultJobBundleFactory.java:240)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache$LoadingValueReference.loadFuture(LocalCache.java:3528)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache$Segment.loadSync(LocalCache.java:2277)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache$Segment.lockedGetOrLoad(LocalCache.java:2154)
at .apache.beam.vendor.guava.v26_0_jre.googlemon.cache.LocalCache$Segment.get(LocalCache.java:2044)
24 more
Someone have the same issue? This error happens if I set env.set_parallelism more than 1. The topic partitions are set to 3.
I was expecting no crash for the error. I tried different apache-flink versions with different Python versions, but nothing changes.