Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Better managing of replication slots #30

Merged
merged 6 commits into from
Jul 31, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def read_config(filename=None, options=None):
'ca_cert': None,
'verify_certs': 'no',
'drop_slot_countdown': 10,
'replication_slots_polling': None,
},
'primary': {
'change_replication_type': 'yes',
Expand Down Expand Up @@ -132,6 +133,8 @@ def read_config(filename=None, options=None):
for key, value in defaults[section].items():
if not config.has_option(section, key):
config.set(section, key, value)
if config.get('global', 'replication_slots_polling') is None:
config.set('global', 'replication_slots_polling', config.get('global', 'use_replication_slots'))

#
# Rewriting global config with parameters from command line.
Expand Down
6 changes: 3 additions & 3 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ def non_ha_replica_iter(self, db_state, zk_state):
)
current_primary = zk_state['lock_holder']

if streaming_from_primary:
if streaming_from_primary and not streaming:
self._acquire_replication_source_slot_lock(current_primary)
if streaming:
munakoiso marked this conversation as resolved.
Show resolved Hide resolved
self._acquire_replication_source_slot_lock(stream_from)
Expand Down Expand Up @@ -1093,7 +1093,7 @@ def _attach_to_primary(self, new_primary, limit):
return True

def _handle_slots(self):
if not self.config.getboolean('global', 'use_replication_slots'):
if not self.config.getboolean('global', 'replication_slots_polling'):
vicpopov marked this conversation as resolved.
Show resolved Hide resolved
return

my_hostname = helpers.get_hostname()
Expand Down Expand Up @@ -1146,7 +1146,7 @@ def _get_db_state(self):
return state

def _acquire_replication_source_slot_lock(self, source):
if not self.config.getboolean('global', 'use_replication_slots'):
if not self.config.getboolean('global', 'replication_slots_polling'):
return
# We need to drop the slot in the old primary.
# But we don't know who the primary was (probably there are many of them).
Expand Down
4 changes: 2 additions & 2 deletions tests/features/kill_primary.feature
vicpopov marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ Feature: Destroy primary in various scenarios
- client_hostname: pgconsul_postgresql3_1.pgconsul_pgconsul_net
state: streaming
"""
When we set value "no" for option "use_replication_slots" in section "global" in pgconsul config in container "postgresql3"
When we set value "no" for option "replication_slots_polling" in section "global" in pgconsul config in container "postgresql3"
And we restart "pgconsul" in container "postgresql3"
When we stop container "postgresql3"
And we wait "10.0" seconds
vicpopov marked this conversation as resolved.
Show resolved Hide resolved
Expand All @@ -359,7 +359,7 @@ Feature: Destroy primary in various scenarios
Then <lock_type> "<lock_host>" has holder "pgconsul_postgresql2_1.pgconsul_pgconsul_net" for lock "/pgconsul/postgresql/leader"
Then container "postgresql2" became a primary
Then <lock_type> "<lock_host>" has value "finished" for key "/pgconsul/postgresql/failover_state"
When we set value "yes" for option "use_replication_slots" in section "global" in pgconsul config in container "postgresql3"
When we set value "yes" for option "replication_slots_polling" in section "global" in pgconsul config in container "postgresql3"
And we restart "pgconsul" in container "postgresql3"
Then container "postgresql3" is in quorum group
Then <lock_type> "<lock_host>" has following values for key "/pgconsul/postgresql/replics_info"
Expand Down