如何解决从 Wildfly 20 升级到 23.0.1 后,infinispan 缓存中的超时消息记录和重试风暴
我们刚刚从 wildfly 20 升级到了 23,现在发现 infinispan 出错、进入日志和重试循环的问题。该问题在启动后每秒发生 100 次,并且仅在集群的一个节点关闭时停止。
我们收到以下错误,该错误会无限期地停用,在服务器之间使用大约 30Mbs 的带宽,通常约为 10-30Kbs。错误的令人困惑的部分是节点 1,它从节点 2 收到错误,节点 2 的错误是从节点 1 超时。我尝试从 udp 移到 tcp 堆栈,但仍然看到相同的问题(它是一个 2 节点集群)。
我将远程超时从默认的 10 秒增加到 30 秒,几乎立即看到了同样的错误。
wildfly 23 是否需要新的设置,或者我这边有什么其他的失误,或者我遇到了新的错误?
这里是 jgroups 配置:
<stack name="udp" statistics-enabled="true">
<transport type="UDP" shared="false" socket-binding="jgroups-udp" statistics-enabled="true">
<property name="log_discard_msgs">
false
</property>
<property name="port_range">
50
</property>
</transport>
<protocol type="PING" module="org.jgroups" statistics-enabled="true"/>
<protocol type="MERGE3" module="org.jgroups" statistics-enabled="true"/>
<socket-protocol type="FD_SOCK" module="org.jgroups" socket-binding="jgroups-udp-fd" statistics-enabled="true"/>
<protocol type="FD_ALL" module="org.jgroups" statistics-enabled="true"/>
<protocol type="VERIFY_SUSPECT" module="org.jgroups" statistics-enabled="true"/>
<protocol type="pbcast.NAKACK2" module="org.jgroups" statistics-enabled="true"/>
<protocol type="UNICAST3" module="org.jgroups" statistics-enabled="true"/>
<protocol type="pbcast.STABLE" module="org.jgroups" statistics-enabled="true"/>
<protocol type="pbcast.GMS" module="org.jgroups" statistics-enabled="true"/>
<protocol type="UFC" module="org.jgroups" statistics-enabled="true"/>
<protocol type="MFC" module="org.jgroups" statistics-enabled="true"/>
<protocol type="FRAG3"/>
</stack>
和 infinispan
<cache-container name="localsite-cachecontainer" default-cache="epi-localsite-default" statistics-enabled="true">
<transport lock-timeout="60000" channel="localsite-appCache"/>
<replicated-cache name="bServiceCache" statistics-enabled="true">
<locking isolation="NONE"/>
<transaction mode="NONE"/>
<expiration lifespan="1800000"/>
</replicated-cache>
22:47:52,823 WARN [org.infinispan.CLUSTER] (thread-223,application-localsite,node1) ISPN000071: Caught exception when handling command SingleRpcCommand{cacheName='application-bServiceCache',command=PutkeyvalueCommand{key=SimpleKey [XXXX,2021-05-06,1412.0,75.0,null],value=[YYYY[pp=4 Pay,PaymentDue=2021-05-28],ppAvaliablity[firstPaymentDue=2021-05-28],ppAvaliablity[firstPaymentDue=2021-05-28]],flags=[],commandInvocationId=CommandInvocation:node2:537,putIfAbsent=true,valueMatcher=MATCH_ALWAYS,Metadata=EmbeddedExpirableMetadata{version=null,lifespan=1800000,maxIdle=-1},successful=true,topologyId=18}}: org.infinispan.remoting.remoteexception: ISPN000217: Received exception from node2,see cause for remote stack trace
at org.infinispan.remoting.transport.ResponseCollectors.wrapremoteexception(ResponseCollectors.java:25)
at org.infinispan.remoting.transport.impl.MapResponseCollector.addException(MapResponseCollector.java:64)
at org.infinispan.remoting.transport.impl.MapResponseCollector$IgnoreLeavers.addException(MapResponseCollector.java:102)
at org.infinispan.remoting.transport.ValidResponseCollector.addResponse(ValidResponseCollector.java:29)
at org.infinispan.remoting.transport.impl.MultiTargetRequest.onResponse(MultiTargetRequest.java:93)
at org.infinispan.remoting.transport.impl.RequestRepository.addResponse(RequestRepository.java:52)
at org.infinispan.remoting.transport.jgroups.jgroupsTransport.processResponse(jgroupsTransport.java:1402)
at org.infinispan.remoting.transport.jgroups.jgroupsTransport.processMessage(jgroupsTransport.java:1305)
at org.infinispan.remoting.transport.jgroups.jgroupsTransport.access$300(jgroupsTransport.java:131)
at org.infinispan.remoting.transport.jgroups.jgroupsTransport$ChannelCallbacks.up(jgroupsTransport.java:1445)
at org.jgroups.JChannel.up(JChannel.java:784)
at org.jgroups.fork.ForkProtocolStack.up(ForkProtocolStack.java:135)
at org.jgroups.stack.Protocol.up(Protocol.java:309)
at org.jgroups.protocols.FORK.up(FORK.java:142)
at org.jgroups.protocols.FRAG3.up(FRAG3.java:165)
at org.jgroups.protocols.FlowControl.up(FlowControl.java:343)
at org.jgroups.protocols.pbcast.GMS.up(GMS.java:876)
at org.jgroups.protocols.pbcast.STABLE.up(STABLE.java:243)
at org.jgroups.protocols.UNICAST3.deliverMessage(UNICAST3.java:1049)
at org.jgroups.protocols.UNICAST3.addMessage(UNICAST3.java:772)
at org.jgroups.protocols.UNICAST3.handleDataReceived(UNICAST3.java:753)
at org.jgroups.protocols.UNICAST3.up(UNICAST3.java:405)
at org.jgroups.protocols.pbcast.NAKACK2.up(NAKACK2.java:592)
at org.jgroups.protocols.VERIFY_SUSPECT.up(VERIFY_SUSPECT.java:132)
at org.jgroups.protocols.FD.up(FD.java:227)
at org.jgroups.protocols.FD_SOCK.up(FD_SOCK.java:254)
at org.jgroups.protocols.MERGE3.up(MERGE3.java:281)
at org.jgroups.protocols.discovery.up(discovery.java:300)
at org.jgroups.protocols.TP.passMessageUp(TP.java:1396)
at org.jgroups.util.SubmitToThreadPool$SingleMessageHandler.run(SubmitToThreadPool.java:87)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at org.jboss.as.clustering.context.ContextReferenceExecutor.execute(ContextReferenceExecutor.java:49)
at org.jboss.as.clustering.context.ContextualExecutor$1.run(ContextualExecutor.java:70)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.infinispan.util.concurrent.TimeoutException: ISPN000476: Timed out waiting for responses for request 4485 from node1
at sun.reflect.GeneratedConstructorAccessor551.newInstance(UnkNown Source)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.infinispan.marshall.exts.ThrowableExternalizer.readGenericThrowable(ThrowableExternalizer.java:282)
at org.infinispan.marshall.exts.ThrowableExternalizer.readobject(ThrowableExternalizer.java:259)
at org.infinispan.marshall.exts.ThrowableExternalizer.readobject(ThrowableExternalizer.java:42)
at org.infinispan.marshall.core.GlobalMarshaller.readWithExternalizer(GlobalMarshaller.java:728)
at org.infinispan.marshall.core.GlobalMarshaller.readNonNullableObject(GlobalMarshaller.java:709)
at org.infinispan.marshall.core.GlobalMarshaller.readNullableObject(GlobalMarshaller.java:358)
at org.infinispan.marshall.core.BytesObjectInput.readobject(BytesObjectInput.java:32)
at org.infinispan.remoting.responses.ExceptionResponse$Externalizer.readobject(ExceptionResponse.java:49)
at org.infinispan.remoting.responses.ExceptionResponse$Externalizer.readobject(ExceptionResponse.java:41)
at org.infinispan.marshall.core.GlobalMarshaller.readWithExternalizer(GlobalMarshaller.java:728)
at org.infinispan.marshall.core.GlobalMarshaller.readNonNullableObject(GlobalMarshaller.java:709)
at org.infinispan.marshall.core.GlobalMarshaller.readNullableObject(GlobalMarshaller.java:358)
at org.infinispan.marshall.core.GlobalMarshaller.objectFromObjectInput(GlobalMarshaller.java:192)
at org.infinispan.marshall.core.GlobalMarshaller.objectFromByteBuffer(GlobalMarshaller.java:221)
at org.infinispan.remoting.transport.jgroups.jgroupsTransport.processResponse(jgroupsTransport.java:1394)
... 28 more
解决方法
您能否还附上“localsite-appCache”频道的配置? 您能否附上一段代码片段来演示如何在应用程序中引用缓存?
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。