微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

异步从节点在 Postgres11 上缺少 WAL 文件

如何解决异步从节点在 Postgres11 上缺少 WAL 文件

我有 3 个运行 Master-Slave Postgres-11 的 VM 节点。它们由 Pacemaker 管理。

JSON.parse

Node Attributes: * Node node04: + master-pgsqlins : 1000 + pgsqlins-data-status : LATEST + pgsqlins-master-baseline : 000000C0D8000098 + pgsqlins-status : PRI * Node node05: + master-pgsqlins : -INFINITY + pgsqlins-data-status : STREAMING|ASYNC + pgsqlins-status : HS:async * Node node06: + master-pgsqlins : 100 + pgsqlins-data-status : STREAMING|SYNC + pgsqlins-status : HS:sync 有时会在缺少所需的 WAL 文件时引发错误。然后它停止复制并再次启动它。

Async node 节点上,WAL 归档已启用,它们会同步到另一个名为 master文件夹。还有另一个进程不断从该 wal_archive 文件夹中删除文件。所以我明白为什么从节点会抛出那个错误,但我想明白的是它如何能够在没有丢失文件的情况下重新开始

postgresql.conf

wal_archive

在异步从节点上,这是recovery.conf

# Connection settings
# -------------------
listen_addresses = '*'
port = 5432
max_connections = 600

tcp_keepalives_idle = 0
tcp_keepalives_interval = 0
tcp_keepalives_count = 0

# Memory-related settings
# -----------------------
shared_buffers = 2GB           # Physical memory 1/4
##DEBUG:  mmap(1652555776) with MAP_HUGETLB Failed,huge pages disabled: Cannot allocate memory
#huge_pages = try              # on,off,or try
#temp_buffers = 16MB            # depends on DB checklist
work_mem = 8MB                 # Need tuning
effective_cache_size = 4GB      # Physical memory 1/2
maintenance_work_mem = 512MB
wal_buffers = 64MB

# WAL/Replication/HA   settings
# --------------------
wal_level = logical
synchronous_commit = remote_write
archive_mode = on
archive_command = 'rsync -a %p /xxxxx/wal_archive/%f'
#archive_command = ':'
max_wal_senders=5
hot_standby = on
restart_after_crash = off
wal_sender_timeout = 60000
wal_receiver_status_interval = 2
max_standby_streaming_delay = -1
max_standby_archive_delay = -1
hot_standby_Feedback = on
random_page_cost = 1.5

max_wal_size = 5GB
min_wal_size = 200MB
checkpoint_completion_target = 0.9
checkpoint_timeout = 30min

# Logging settings
# ----------------
log_destination = 'csvlog,syslog'
logging_collector = on
log_directory = 'pg_log'
log_filename = 'postgresql_%Y%m%d.log'
log_truncate_on_rotation = off
log_rotation_age = 1h
log_rotation_size = 0

log_timezone = 'Japan'
log_line_prefix = '%t [%p]: [%l-1] %h:%u@%d:[XXXPG]:CODE:%e '

log_statement = ddl
log_min_messages = info         # DEBUG5
log_min_error_statement = info  # DEBUG5
log_error_verbosity = default
log_checkpoints = on
log_lock_waits = on
log_temp_files = 0
log_connections = on
log_disconnections = on
log_duration = off
log_min_duration_statement = 1000
log_autovacuum_min_duration = 3000ms

track_functions = pl
track_activity_query_size = 8192

# Locale/display settings
# -----------------------
lc_messages = 'C'
lc_monetary = 'en_US.UTF-8'  # ja_JP.eucJP
lc_numeric  = 'en_US.UTF-8'  # ja_JP.eucJP
lc_time     = 'en_US.UTF-8'  # ja_JP.eucJP
timezone = 'Asia/Tokyo'
bytea_output = 'escape'


# Auto vacuum settings
# -----------------------
autovacuum = on
autovacuum_max_workers = 3
autovacuum_vacuum_cost_limit = 200
#shared_preload_libraries  = 'pg_stat_statements,auto_explain'   <------------------check this

auto_explain.log_min_duration = 10000
auto_explain.log_analyze = on
include '/var/lib/pgsql/tmp/rep_mode.conf' # added by pgsql RA

关于master错误日志

primary_conninfo = 'host=1xx.xx.xx.xx port=5432 user=replica application_name=node05 keepalives_idle=60 keepalives_interval=5 keepalives_count=5'
restore_command = 'rsync -a /xxxxx/wal_archive/%f %p'
recovery_target_timeline = 'latest'
standby_mode = 'on'

关于异步从节点错误日志

2021-07-05 23:35:02.321 JST,28926,60e16b42.70fe,122,2021-07-04 17:03:14 JST,LOG,00000,"checkpoint complete: wrote 2897 buffers (1.1%); 0 WAL file(s) added,0 removed,2 recycled; write=106.770 s,sync=0.050 s,total=106.827 s; sync files=251,longest=0.017 s,average=0.001 s; distance=20262 kB,estimate=46658 kB",""
2021-07-05 23:35:02.322 JST,123,"checkpoint starting: immediate force wait",""
2021-07-05 23:35:02.347 JST,124,"checkpoint complete: wrote 173 buffers (0.1%); 0 WAL file(s) added,1 recycled; write=0.007 s,sync=0.012 s,total=0.026 s; sync files=43,longest=0.005 s,average=0.001 s; distance=14410 kB,estimate=43434 kB",""
2021-07-05 23:35:02.348 JST,"replica","",3451,"1xx.xx.xx.xxx:45120",60e16bfc.d7b,3,"streaming C1/97C3E000",2021-07-04 17:06:20 JST,116/0,ERROR,XX000,"requested WAL segment 00000001000000C100000097 has already been removed","node05"
2021-07-05 23:35:02.361 JST,4,"idle","disconnection: session time: 30:28:41.550 user=replica database= host=172.17.48.141 port=45120","node05"
2021-07-05 23:35:02.399 JST,24896,"1xx.xx.xx.xxx:49278",60e31896.6140,1,2021-07-05 23:35:02 JST,"connection received: host=1xx.xx.xx.xxx port=49278",""
2021-07-05 23:35:02.401 JST,"postgres",24851,"[local]",60e31896.6113,"disconnection: session time: 0:00:00.251 user=postgres database=postgres host=[local]","postgres@node04"
2021-07-05 23:35:02.403 JST,2,"authentication",116/72,"replication connection authorized: user=replica",""

2021-07-05 23:35:02.359 JST,2541,60e16bfc.9ed,FATAL,"Could not receive data from WAL stream: ERROR: requested WAL segment 00000001000000C100000097 has already been removed","" 2021-07-05 23:35:02.408 JST,4703,60e31896.125f,"started streaming WAL from primary at C1/98000000 on timeline 1","" 2021-07-05 23:35:03.318 JST,4835,60e31897.12e3,2021-07-05 23:35:03 JST,"connection received: host=[local]","" 不会抛出此错误,只会抛出 Sync slave node,并且无需任何人工干预即可恢复。除了不每 2 分钟从 async slave node 文件夹中删除存档的 wal 文件之外,还有其他方法可以避免此错误吗?

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。