微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

为什么http:// localhost:9021 /无法打开Confluent控制中心?

如何解决为什么http:// localhost:9021 /无法打开Confluent控制中心?

我正在Docker for Desktop上进行Confluent Admin培训和运行实验室。 PFA docker-compose yaml文件。 汇流控制中心不开放浏览。我正在使用http:// localhost:9021打开。 Ealier曾经开放过,但现在没有了。我在计算机上所做的唯一更改是安装了McAfee Live Safe。我什至尝试关闭防火墙,但也无济于事。

如果您有类似的经验以及如何克服此问题,可以请他人分享吗?

docker-compose.yaml文件

    version: "3.5"
    services:
      zk-1:
        image: confluentinc/cp-zookeeper:5.3.1
        hostname: zk-1
        container_name: zk-1
        ports:
          - "12181:2181"
        volumes:
          - data-zk-log-1:/var/lib/zookeeper/log
          - data-zk-data-1:/var/lib/zookeeper/data
        networks:
          - confluent
        environment:
          - ZOOKEEPER_SERVER_ID=1
          - ZOOKEEPER_CLIENT_PORT=2181
          - ZOOKEEPER_TICK_TIME=2000
          - ZOOKEEPER_INIT_LIMIT=5
          - ZOOKEEPER_SYNC_LIMIT=2
          - ZOOKEEPER_SERVERS=zk-1:2888:3888;zk-2:2888:3888;zk-3:2888:3888
      
      zk-2:
        image: confluentinc/cp-zookeeper:5.3.1
        hostname: zk-2
        container_name: zk-2
        ports:
          - "22181:2181"
        volumes:
          - data-zk-log-2:/var/lib/zookeeper/log
          - data-zk-data-2:/var/lib/zookeeper/data
        networks:
          - confluent
        environment:
          - ZOOKEEPER_SERVER_ID=2
          - ZOOKEEPER_CLIENT_PORT=2181
          - ZOOKEEPER_TICK_TIME=2000
          - ZOOKEEPER_INIT_LIMIT=5
          - ZOOKEEPER_SYNC_LIMIT=2
          - ZOOKEEPER_SERVERS=zk-1:2888:3888;zk-2:2888:3888;zk-3:2888:3888
      
      zk-3:
        image: confluentinc/cp-zookeeper:5.3.1
        hostname: zk-3
        container_name: zk-3
        ports:
          - "32181:2181"
        volumes:
          - data-zk-log-3:/var/lib/zookeeper/log
          - data-zk-data-3:/var/lib/zookeeper/data
        networks:
          - confluent
        environment:
          - ZOOKEEPER_SERVER_ID=3
          - ZOOKEEPER_CLIENT_PORT=2181
          - ZOOKEEPER_TICK_TIME=2000
          - ZOOKEEPER_INIT_LIMIT=5
          - ZOOKEEPER_SYNC_LIMIT=2
          - ZOOKEEPER_SERVERS=zk-1:2888:3888;zk-2:2888:3888;zk-3:2888:3888
    
      kafka-1:
        image: confluentinc/cp-enterprise-kafka:5.3.1
        hostname: kafka-1
        container_name: kafka-1
        ports:
          - "19092:9092"
        networks:
          - confluent
        volumes:
          - data-kafka-1:/var/lib/kafka/data
        environment:
          KAFKA_broKER_ID: 101
          KAFKA_ZOOKEEPER_CONNECT: zk-1:2181,zk-2:2181,zk-3:2181
          KAFKA_LISTENERS: DOCKER://kafka-1:9092,HOST://kafka-1:19092
          KAFKA_ADVERTISED_LISTENERS: DOCKER://kafka-1:9092,HOST://localhost:19092
          KAFKA_LISTENER_Security_PROTOCOL_MAP: DOCKER:PLAINTEXT,HOST:PLAINTEXT
          KAFKA_INTER_broKER_LISTENER_NAME: DOCKER
          KAFKA_METRIC_REPORTERS: "io.confluent.metrics.reporter.ConfluentMetricsReporter"
          CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: "kafka-1:9092,kafka-2:9092,kafka-3:9092"
    
      kafka-2:
        image: confluentinc/cp-enterprise-kafka:5.3.1
        hostname: kafka-2
        container_name: kafka-2
        ports:
          - "29092:9092"
        networks:
          - confluent
        volumes:
          - data-kafka-2:/var/lib/kafka/data
        environment:
          KAFKA_broKER_ID: 102
          KAFKA_ZOOKEEPER_CONNECT: zk-1:2181,zk-3:2181
          KAFKA_LISTENERS: DOCKER://kafka-2:9092,HOST://kafka-2:29092
          KAFKA_ADVERTISED_LISTENERS: DOCKER://kafka-2:9092,HOST://localhost:29092
          KAFKA_LISTENER_Security_PROTOCOL_MAP: DOCKER:PLAINTEXT,kafka-3:9092"
    
      kafka-3:
        image: confluentinc/cp-enterprise-kafka:5.3.1
        hostname: kafka-3
        container_name: kafka-3
        ports:
          - "39092:9092"
        networks:
          - confluent
        volumes:
          - data-kafka-3:/var/lib/kafka/data
        environment:
          KAFKA_broKER_ID: 103
          KAFKA_ZOOKEEPER_CONNECT: zk-1:2181,zk-3:2181
          KAFKA_LISTENERS: DOCKER://kafka-3:9092,HOST://kafka-3:39092
          KAFKA_ADVERTISED_LISTENERS: DOCKER://kafka-3:9092,HOST://localhost:39092
          KAFKA_LISTENER_Security_PROTOCOL_MAP: DOCKER:PLAINTEXT,kafka-3:9092"
    
      schema-registry:
        image: confluentinc/cp-schema-registry:5.3.1
        hostname: schema-registry
        container_name: schema-registry
        ports:
          - "8081:8081"
        networks:
          - confluent
        environment:
          SCHEMA_REGISTRY_HOST_NAME: schema-registry
          SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: "kafka-1:9092,kafka-3:9092"
          SCHEMA_REGISTRY_LISTENERS: "http://schema-registry:8081,http://localhost:8081"
          # Uses incorrect container utility belt (CUB) environment variables due to bug.
          # See https://github.com/confluentinc/cp-docker-images/issues/807. A fix was merged that
          # will be available in the CP 5.4 image.
          KAFKA_REST_CUB_KAFKA_TIMEOUT: 120
          KAFKA_REST_CUB_KAFKA_MIN_broKERS: 3
    
      connect:
        image: confluentinc/cp-kafka-connect:5.3.1
        hostname: connect
        container_name: connect
        ports:
          - "8083:8083"
        volumes:
          - ./data:/data
        networks:
          - confluent
        environment:
          CONNECT_PRODUCER_INTERCEPTOR_CLASSES: io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor
          CONNECT_CONSUMER_INTERCEPTOR_CLASSES: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
          CONNECT_BOOTSTRAP_SERVERS: kafka-1:9092,kafka-3:9092
          CONNECT_GROUP_ID: "connect"
          CONNECT_CONfig_STORAGE_TOPIC: "connect-config"
          CONNECT_OFFSET_STORAGE_TOPIC: "connect-offsets"
          CONNECT_STATUS_STORAGE_TOPIC: "connect-status"
          CONNECT_KEY_CONVERTER: "io.confluent.connect.avro.AvroConverter"
          CONNECT_VALUE_CONVERTER: "io.confluent.connect.avro.AvroConverter"
          CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
          CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
          CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
          CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
          CONNECT_REST_ADVERTISED_HOST_NAME: "connect"
          CONNECT_LOG4J_ROOT_LOGLEVEL: INFO
          CONNECT_LOG4J_LOGGERS: org.reflections=ERROR
          CONNECT_PLUGIN_PATH: /usr/share/java
          CONNECT_REST_HOST_NAME: "connect"
          CONNECT_REST_PORT: 8083
          CONNECT_CUB_KAFKA_TIMEOUT: 120
    
      ksql-server:
        image: confluentinc/cp-ksql-server:5.3.1
        hostname: ksql-server
        container_name: ksql-server
        ports:
          - "8088:8088"
        networks:
          - confluent
        environment:
          Ksql_CONfig_DIR: "/etc/ksql"
          Ksql_LOG4J_OPTS: "-Dlog4j.configuration=file:/etc/ksql/log4j-rolling.properties"
          Ksql_BOOTSTRAP_SERVERS: kafka-1:9092,kafka-3:9092
          Ksql_HOST_NAME: ksql-server
          Ksql_APPLICATION_ID: "etl-demo"
          Ksql_LISTENERS: "http://0.0.0.0:8088"
          # Set the buffer cache to 0 so that the Ksql CLI shows all updates to KTables for learning purposes.
          # The default is 10 MB,which means records in a KTable are compacted before showing output.
          # Change cache.max.bytes.buffering and commit.interval.ms to tune this behavior.
          Ksql_CACHE_MAX_BYTES_BUFFERING: 0
          Ksql_Ksql_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
          Ksql_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
          Ksql_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
    
      control-center:
        image: confluentinc/cp-enterprise-control-center:5.3.1
        hostname: control-center
        container_name: control-center
        restart: always
        networks:
          - confluent
        ports:
          - "9021:9021"
        environment:
          CONTROL_CENTER_BOOTSTRAP_SERVERS: kafka-1:9092,kafka-3:9092
          CONTROL_CENTER_ZOOKEEPER_CONNECT: zk-1:2181,zk-3:2181
          CONTROL_CENTER_STREAMS_NUM_STREAM_THREADS: 4
          CONTROL_CENTER_REPLICATION_FACTOR: 3
          CONTROL_CENTER_CONNECT_CLUSTER: "connect:8083"
          CONTROL_CENTER_Ksql_URL: "http://ksql-server:8088"
          CONTROL_CENTER_Ksql_ADVERTISED_URL: "http://localhost:8088"
          CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
    
      tools:
        image: cnfltraining/training-tools:5.3
        hostname: tools
        container_name: tools
        volumes:
          - ${PWD}/:/apps
        working_dir: /apps
        networks:
          - confluent
        command: /bin/bash
        tty: true
    
    volumes:
      data-zk-log-1:
      data-zk-data-1:
      data-zk-log-2:
      data-zk-data-2:
      data-zk-log-3:
      data-zk-data-3:
      data-kafka-1:
      data-kafka-2:
      data-kafka-3:
    
    networks:
      confluent:

所有Docker容器均已启动并正在运行;所有相关的融合服务都已启动。

谢谢!!

解决方法

最后...我从Confluent支持人员那里得到了答案。

实验室中的控制中心版本将在30天后过期。

这可以通过删除PC上的所有容器和卷来重置。

  1. docker-compose down -v将退出并删除所有容器和卷。
  2. 重新运行docker-compose up -d命令。

现在先花一两分钟,然后再在任何浏览器中打开控制中心。 附言应该为Docker提供至少6GB的内存来运行所有容器。

谢谢。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。