Make the number of replicas modifiable when the cluster is running
neoctl gets a new command to change the number of replicas. The number of replicas becomes a new partition table attribute and like the PT id, it is stored in the config table. On the other side, the configuration value for the number of partitions is dropped, since it can be computed from the partition table, which is always stored in full. The -p/-r master options now only apply at database creation. Some implementation notes: - The protocol is slightly optimized in that the master now sends automatically the whole partition tables to the admin & client nodes upon connection, like for storage nodes. This makes the protocol more consistent, and the master is the only remaining node requesting partition tables, during recovery. - Some parts become tricky because app.pt can be None in more cases. For example, the extra condition in NodeManager.update (before app.pt.dropNode) was added for this is the reason. Or the 'loadPartitionTable' method (storage) that is not inlined because of unit tests. Overall, this commit simplifies more than it complicates. - In the master handlers, we stop hijacking the 'connectionCompleted' method for tasks to be performed (often send the full partition table) on handler switches. - The admin's 'bootstrapped' flag could have been removed earlier: race conditions can't happen since the AskNodeInformation packet was removed (commit d048a52d).
Showing
... | @@ -16,6 +16,7 @@ | ... | @@ -16,6 +16,7 @@ |
import sys | import sys | ||
from collections import defaultdict | from collections import defaultdict | ||
from functools import partial | |||
from time import time | from time import time | ||
from neo.lib import logging, util | from neo.lib import logging, util | ||
... | @@ -76,13 +77,11 @@ class Application(BaseApplication): | ... | @@ -76,13 +77,11 @@ class Application(BaseApplication): |
@classmethod | @classmethod | ||
def _buildOptionParser(cls): | def _buildOptionParser(cls): | ||
_ = cls.option_parser | parser = cls.option_parser | ||
_.description = "NEO Master node" | parser.description = "NEO Master node" | ||
cls.addCommonServerOptions('master', '127.0.0.1:10000', '') | cls.addCommonServerOptions('master', '127.0.0.1:10000', '') | ||
_ = _.group('master') | _ = parser.group('master') | ||
_.int('r', 'replicas', default=0, help="replicas number") | |||
_.int('p', 'partitions', default=100, help="partitions number") | |||
_.int('A', 'autostart', | _.int('A', 'autostart', | ||
help="minimum number of pending storage nodes to automatically" | help="minimum number of pending storage nodes to automatically" | ||
" start new cluster (to avoid unwanted recreation of the" | " start new cluster (to avoid unwanted recreation of the" | ||
... | @@ -94,6 +93,10 @@ class Application(BaseApplication): | ... | @@ -94,6 +93,10 @@ class Application(BaseApplication): |
_.int('i', 'nid', | _.int('i', 'nid', | ||
help="specify an NID to use for this process (testing purpose)") | help="specify an NID to use for this process (testing purpose)") | ||
_ = parser.group('database creation') | |||
_.int('r', 'replicas', default=0, help="replicas number") | |||
_.int('p', 'partitions', default=100, help="partitions number") | |||
def __init__(self, config): | def __init__(self, config): | ||
super(Application, self).__init__( | super(Application, self).__init__( | ||
config.get('ssl'), config.get('dynamic_master_list')) | config.get('ssl'), config.get('dynamic_master_list')) | ||
... | @@ -117,14 +120,14 @@ class Application(BaseApplication): | ... | @@ -117,14 +120,14 @@ class Application(BaseApplication): |
replicas = config['replicas'] | replicas = config['replicas'] | ||
partitions = config['partitions'] | partitions = config['partitions'] | ||
if replicas < 0: | if replicas < 0: | ||
raise RuntimeError, 'replicas must be a positive integer' | sys.exit('replicas must be a positive integer') | ||
if partitions <= 0: | if partitions <= 0: | ||
raise RuntimeError, 'partitions must be more than zero' | sys.exit('partitions must be more than zero') | ||
self.pt = PartitionTable(partitions, replicas) | |||
logging.info('Configuration:') | logging.info('Configuration:') | ||
logging.info('Partitions: %d', partitions) | logging.info('Partitions: %d', partitions) | ||
logging.info('Replicas : %d', replicas) | logging.info('Replicas : %d', replicas) | ||
logging.info('Name : %s', self.name) | logging.info('Name : %s', self.name) | ||
self.newPartitionTable = partial(PartitionTable, partitions, replicas) | |||
self.listening_conn = None | self.listening_conn = None | ||
self.cluster_state = None | self.cluster_state = None | ||
... | @@ -212,17 +215,23 @@ class Application(BaseApplication): | ... | @@ -212,17 +215,23 @@ class Application(BaseApplication): |
if node_list: | if node_list: | ||
node.send(Packets.NotifyNodeInformation(now, node_list)) | node.send(Packets.NotifyNodeInformation(now, node_list)) | ||
def broadcastPartitionChanges(self, cell_list): | def broadcastPartitionChanges(self, cell_list, num_replicas=None): | ||
"""Broadcast a Notify Partition Changes packet.""" | """Broadcast a Notify Partition Changes packet.""" | ||
if cell_list: | pt = self.pt | ||
ptid = self.pt.setNextID() | if num_replicas is not None: | ||
self.pt.logUpdated() | pt.setReplicas(num_replicas) | ||
|
|||
packet = Packets.NotifyPartitionChanges(ptid, cell_list) | elif cell_list: | ||
for node in self.nm.getIdentifiedList(): | num_replicas = pt.getReplicas() | ||
# As for broadcastNodesInformation, we don't send the full PT | else: | ||
# when pending storage nodes are added, so keep them notified. | return | ||
if not node.isMaster(): | packet = Packets.NotifyPartitionChanges( | ||
node.send(packet) | pt.setNextID(), num_replicas, cell_list) | ||
pt.logUpdated() | |||
for node in self.nm.getIdentifiedList(): | |||
# As for broadcastNodesInformation, we don't send the full PT | |||
# when pending storage nodes are added, so keep them notified. | |||
if not node.isMaster(): | |||
node.send(packet) | |||
def provideService(self): | def provideService(self): | ||
""" | """ | ||
... | @@ -437,16 +446,7 @@ class Application(BaseApplication): | ... | @@ -437,16 +446,7 @@ class Application(BaseApplication): |
conn.send(notification_packet) | conn.send(notification_packet) | ||
elif conn.isServer(): | elif conn.isServer(): | ||
continue | continue | ||
if node.isClient(): | if node.isMaster(): | ||
if state == ClusterStates.RUNNING: | |||
handler = self.client_service_handler | |||
elif state == ClusterStates.BACKINGUP: | |||
handler = self.client_ro_service_handler | |||
else: | |||
if state != ClusterStates.STOPPING: | |||
conn.abort() | |||
continue | |||
elif node.isMaster(): | |||
if state == ClusterStates.RECOVERING: | if state == ClusterStates.RECOVERING: | ||
handler = self.election_handler | handler = self.election_handler | ||
else: | else: | ||
... | @@ -454,10 +454,16 @@ class Application(BaseApplication): | ... | @@ -454,10 +454,16 @@ class Application(BaseApplication): |
elif node.isStorage() and storage_handler: | elif node.isStorage() and storage_handler: | ||
handler = storage_handler | handler = storage_handler | ||
else: | else: | ||
# There's a single handler type for admins. | |||
# Client can't change handler without being first disconnected. | |||
assert state in ( | |||
ClusterStates.STOPPING, | |||
ClusterStates.STOPPING_BACKUP, | |||
) or not node.isClient(), (state, node) | |||
continue # keep handler | continue # keep handler | ||
if type(handler) is not type(conn.getLastHandler()): | if type(handler) is not type(conn.getLastHandler()): | ||
conn.setHandler(handler) | conn.setHandler(handler) | ||
handler.connectionCompleted(conn, new=False) | handler.handlerSwitched(conn, new=False) | ||
self.cluster_state = state | self.cluster_state = state | ||
def getNewUUID(self, uuid, address, node_type): | def getNewUUID(self, uuid, address, node_type): | ||
... | ... |