从手动Docker主机网络到Docker Compose桥接器的传递

从手动Docker主机网络到Docker Compose桥接器的传递,docker,docker-compose,pymodbus3,Docker,Docker Compose,Pymodbus3,我有两个docker映像,一个modbus服务器和一个客户端,我用docker run--network host server手动运行,客户端也一样,工作正常。但现在我需要将它们添加到docker compose文件中,其中网络是网桥,我是这样做的: autoserver: image: 19mikel95/pymodmikel:autoserversynchub container_name: autoserver restart: unless-stopped

我有两个docker映像,一个modbus服务器和一个客户端,我用
docker run--network host server
手动运行,客户端也一样,工作正常。但现在我需要将它们添加到docker compose文件中,其中网络是网桥,我是这样做的:

autoserver:
    image: 19mikel95/pymodmikel:autoserversynchub
    container_name: autoserver
    restart: unless-stopped

  clientperf:
    image: 19mikel95/pymodmikel:reloadcomp
    container_name: clientperf
    restart: unless-stopped
    depends_on:
      - autoserver
    links:
      - "autoserver:server"
我读到,要从一个容器引用到另一个容器(客户端到服务器),我必须使用DockerComposeYML(autoserver)中的服务名称,这就是我所做的。在客户机中执行的python文件(来自pymodbus)中,我将“localhost”更改为:

host = 'autoserver'
client = ModbusTcpClient(host, port=5020)
但是我得到了这个错误:

[错误/MainProcess]未能成功运行测试回溯(most) 最近调用(last):文件“performance.py”,第72行,在 单客户端测试 client.read\u holding\u寄存器(10,1,单位=1) 文件“/usr/lib/python3/dist-packages/pymodbus/client/common.py”,第行 114,在读保持寄存器中 返回self.execute(request)文件“/usr/lib/python3/dist packages/pymodbus/client/sync.py”,第107行,在 执行 引发ConnectionException(“未能连接[%s]”%(self.str())pymodbus.Exception.ConnectionException:Modbus 错误:[连接]无法连接 连接[ModbusTcpClient(自动服务器:5020)]

正如所问,我的全职码头工人是:

version: '2.1'

networks:
  monitor-net:
    driver: bridge

volumes:
    prometheus_data: {}
    grafana_data: {}

services:

  prometheus:
    image: prom/prometheus:latest
    container_name: prometheus
    volumes:
      - ./prometheus:/etc/prometheus
      - prometheus_data:/prometheus
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.path=/prometheus'
      - '--web.console.libraries=/etc/prometheus/console_libraries'
      - '--web.console.templates=/etc/prometheus/consoles'
      - '--storage.tsdb.retention.time=200h'
      - '--web.enable-lifecycle'
    restart: unless-stopped
    expose:
      - 9090
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  alertmanager:
    image: prom/alertmanager:latest
    container_name: alertmanager
    volumes:
      - ./alertmanager:/etc/alertmanager
    command:
      - '--config.file=/etc/alertmanager/config.yml'
      - '--storage.path=/alertmanager'
    restart: unless-stopped
    expose:
      - 9093
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  nodeexporter:
    image: prom/node-exporter:latest
    container_name: nodeexporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - c:\:/rootfs:ro
    command:
      - '--path.procfs=/host/proc'
      - '--path.rootfs=/rootfs'
      - '--path.sysfs=/host/sys'
      - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
    restart: unless-stopped
    expose:
      - 9100
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  cadvisor:
    image: gcr.io/google-containers/cadvisor:latest
    container_name: cadvisor
    volumes:
      - c:\:/rootfs:ro
      - /var/run:/var/run:rw
      - /sys:/sys:ro
      - /var/lib/docker:/var/lib/docker:ro
      #- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
    restart: unless-stopped
    expose:
      - 8080
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    volumes:
      - grafana_data:/var/lib/grafana
      - ./grafana/provisioning:/etc/grafana/provisioning
    environment:
      - GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
      - GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
      - GF_USERS_ALLOW_SIGN_UP=false
    restart: unless-stopped
    expose:
      - 3000
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  pushgateway:
    image: prom/pushgateway:latest
    container_name: pushgateway
    restart: unless-stopped
    expose:
      - 9091
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  caddy:
    image: stefanprodan/caddy
    container_name: caddy
    ports:
      - "3000:3000"
      - "9090:9090"
      - "9093:9093"
      - "9091:9091"
    volumes:
      - ./caddy:/etc/caddy
    environment:
      - ADMIN_USER=${ADMIN_USER:-admin}
      - ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
    restart: unless-stopped
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  autoserver:
    image: 19mikel95/pymodmikel:autoserversynchub
    container_name: autoserver
    ports:
      - "5020:5020"
    restart: unless-stopped
    networks:
      - monitor-net

  clientperf:
    image: 19mikel95/pymodmikel:reloadcomp
    container_name: clientperf
    restart: unless-stopped
    networks:
      - monitor-net
    depends_on:
      - autoserver
    links:
      - "autoserver:server"

您可以尝试使用像AUTO_SERVER_HOST这样的env变量,并在代码中调用它

version: '2.1'

networks:
  monitor-net:
    driver: bridge

volumes:
    prometheus_data: {}
    grafana_data: {}

services:

  prometheus:
    image: prom/prometheus:latest
    container_name: prometheus
    volumes:
      - ./prometheus:/etc/prometheus
      - prometheus_data:/prometheus
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.path=/prometheus'
      - '--web.console.libraries=/etc/prometheus/console_libraries'
      - '--web.console.templates=/etc/prometheus/consoles'
      - '--storage.tsdb.retention.time=200h'
      - '--web.enable-lifecycle'
    restart: unless-stopped
    expose:
      - 9090
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  alertmanager:
    image: prom/alertmanager:latest
    container_name: alertmanager
    volumes:
      - ./alertmanager:/etc/alertmanager
    command:
      - '--config.file=/etc/alertmanager/config.yml'
      - '--storage.path=/alertmanager'
    restart: unless-stopped
    expose:
      - 9093
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  nodeexporter:
    image: prom/node-exporter:latest
    container_name: nodeexporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - c:\:/rootfs:ro
    command:
      - '--path.procfs=/host/proc'
      - '--path.rootfs=/rootfs'
      - '--path.sysfs=/host/sys'
      - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
    restart: unless-stopped
    expose:
      - 9100
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  cadvisor:
    image: gcr.io/google-containers/cadvisor:latest
    container_name: cadvisor
    volumes:
      - c:\:/rootfs:ro
      - /var/run:/var/run:rw
      - /sys:/sys:ro
      - /var/lib/docker:/var/lib/docker:ro
      #- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
    restart: unless-stopped
    expose:
      - 8080
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    volumes:
      - grafana_data:/var/lib/grafana
      - ./grafana/provisioning:/etc/grafana/provisioning
    environment:
      - GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
      - GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
      - GF_USERS_ALLOW_SIGN_UP=false
    restart: unless-stopped
    expose:
      - 3000
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  pushgateway:
    image: prom/pushgateway:latest
    container_name: pushgateway
    restart: unless-stopped
    expose:
      - 9091
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  caddy:
    image: stefanprodan/caddy
    container_name: caddy
    ports:
      - "3000:3000"
      - "9090:9090"
      - "9093:9093"
      - "9091:9091"
    volumes:
      - ./caddy:/etc/caddy
    environment:
      - ADMIN_USER=${ADMIN_USER:-admin}
      - ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
    restart: unless-stopped
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  autoserver:
    image: 19mikel95/pymodmikel:autoserversynchub
    container_name: autoserver
    ports:
      - "5020:5020"
    restart: unless-stopped
    networks:
      - monitor-net

  clientperf:
    image: 19mikel95/pymodmikel:reloadcomp
    container_name: clientperf
    restart: unless-stopped
    networks:
      - monitor-net
    depends_on:
      - autoserver
    environment:
      - AUTO_SERVER_HOST=autoserver
调用env变量,如下所示

host = os.environ['AUTO_SERVER_HOST']
client = ModbusTcpClient(host, port=5020)

问题出在来自
autoserver
图像的
sincserver.py
文件的
StartTcpServer(context,identity=identity,address=(“localhost”,5020))
中。
localhost
允许TcpServer只接受来自
localhost
的连接。应将其替换为
0.0.0.0
,以允许对该端口的任何外部请求

下面的Docker Compose显示了它(
sed-i的| localhost | 0.0.0 | g | sincserver.py
替换主机名):

版本:“2.1”
服务:
自动服务器:
图片:19mikel95/pymodmikel:autoserversynchub
命令:sh-c“
sed-i的| localhost | 0.0.0 | g | sincserver.py;
python3 sincserver.py守护进程关闭
"
端口:
- "5020:5020"
重新启动:除非停止
客户绩效:
图片:19mikel95/pymodmikel:reloadcomp
重新启动:除非停止
取决于:
-自动服务器
运行:

docker compose up-d
docker撰写日志-f clientperf
你会看到像木头一样的东西

clientperf_1  | [DEBUG/MainProcess] 574 requests/second
clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.7410697999875993 seconds
clientperf_1  | [DEBUG/MainProcess] 692 requests/second
clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4434449000109453 seconds
clientperf_1  | [DEBUG/MainProcess] 708 requests/second
clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4116760999895632 seconds
clientperf_1  | [DEBUG/MainProcess] 890 requests/second
clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.1230684999900404 seconds
clientperf_1  | [DEBUG/MainProcess] 803 requests/second
clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.2450218999874778 seconds
clientperf_1  | [DEBUG/MainProcess] 753 requests/second
clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.3274328999978025 seconds
clientperf_1  | [DEBUG/MainProcess] 609 requests/second
clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.6399398999928962 seconds

您是否验证了
autoserver
容器正在运行并准备接受连接?是的,它完全正确running@19mike95您可以分享整个
docker compose.yaml
文件吗?我想您不需要
clientperf
中的
链接。当使用相同的网络时,它已经可以通过主机名访问。@19mike95这很奇怪。您是否尝试在客户端容器上运行一些dns疑难解答命令,如
nslookup autoserver
dig autoserver
错误:撰写文件“.\docker Compose.yml”无效,因为:services.clientperf的配置选项不受支持。
,并且还尝试了假定的“environment”在docker compose 2.1中工作,但仍然出现相同的错误。使用docker compose版本有什么具体原因:“2.1”吗?我使用的是drockprom,docker compose 3没有更新的版本。管理使用环境的docker compose 3仍然出现与alwways
文件/usr/lib/python3/dist packages/pymodbus/client/sync.py相同的错误,第107行,在执行raise ConnectionException(“未能连接[%s]”%(self.\uu str\uu())pymodbus.exceptions.ConnectionException:Modbus错误:[连接]未能连接[ModbusTcpClient(自动服务器:5020)]
让我们调试它。你能打印出os.environ['AUTO\u SERVER\u HOST']
并看看它提供了什么吗?