Make the number of replicas modifiable when the cluster is running
neoctl gets a new command to change the number of replicas. The number of replicas becomes a new partition table attribute and like the PT id, it is stored in the config table. On the other side, the configuration value for the number of partitions is dropped, since it can be computed from the partition table, which is always stored in full. The -p/-r master options now only apply at database creation. Some implementation notes: - The protocol is slightly optimized in that the master now sends automatically the whole partition tables to the admin & client nodes upon connection, like for storage nodes. This makes the protocol more consistent, and the master is the only remaining node requesting partition tables, during recovery. - Some parts become tricky because app.pt can be None in more cases. For example, the extra condition in NodeManager.update (before app.pt.dropNode) was added for this is the reason. Or the 'loadPartitionTable' method (storage) that is not inlined because of unit tests. Overall, this commit simplifies more than it complicates. - In the master handlers, we stop hijacking the 'connectionCompleted' method for tasks to be performed (often send the full partition table) on handler switches. - The admin's 'bootstrapped' flag could have been removed earlier: race conditions can't happen since the AskNodeInformation packet was removed (commit d048a52d).
Showing
... | ... | @@ -16,6 +16,7 @@ |
import sys | ||
from collections import defaultdict | ||
from functools import partial | ||
from time import time | ||
from neo.lib import logging, util | ||
... | ... | @@ -76,13 +77,11 @@ class Application(BaseApplication): |
@classmethod | ||
def _buildOptionParser(cls): | ||
_ = cls.option_parser | ||
_.description = "NEO Master node" | ||
parser = cls.option_parser | ||
parser.description = "NEO Master node" | ||
cls.addCommonServerOptions('master', '127.0.0.1:10000', '') | ||
_ = _.group('master') | ||
_.int('r', 'replicas', default=0, help="replicas number") | ||
_.int('p', 'partitions', default=100, help="partitions number") | ||
_ = parser.group('master') | ||
_.int('A', 'autostart', | ||
help="minimum number of pending storage nodes to automatically" | ||
" start new cluster (to avoid unwanted recreation of the" | ||
... | ... | @@ -94,6 +93,10 @@ class Application(BaseApplication): |
_.int('i', 'nid', | ||
help="specify an NID to use for this process (testing purpose)") | ||
_ = parser.group('database creation') | ||
_.int('r', 'replicas', default=0, help="replicas number") | ||
_.int('p', 'partitions', default=100, help="partitions number") | ||
def __init__(self, config): | ||
super(Application, self).__init__( | ||
config.get('ssl'), config.get('dynamic_master_list')) | ||
... | ... | @@ -117,14 +120,14 @@ class Application(BaseApplication): |
replicas = config['replicas'] | ||
partitions = config['partitions'] | ||
if replicas < 0: | ||
raise RuntimeError, 'replicas must be a positive integer' | ||
sys.exit('replicas must be a positive integer') | ||
if partitions <= 0: | ||
raise RuntimeError, 'partitions must be more than zero' | ||
self.pt = PartitionTable(partitions, replicas) | ||
sys.exit('partitions must be more than zero') | ||
logging.info('Configuration:') | ||
logging.info('Partitions: %d', partitions) | ||
logging.info('Replicas : %d', replicas) | ||
logging.info('Name : %s', self.name) | ||
self.newPartitionTable = partial(PartitionTable, partitions, replicas) | ||
self.listening_conn = None | ||
self.cluster_state = None | ||
... | ... | @@ -212,17 +215,23 @@ class Application(BaseApplication): |
if node_list: | ||
node.send(Packets.NotifyNodeInformation(now, node_list)) | ||
def broadcastPartitionChanges(self, cell_list): | ||
def broadcastPartitionChanges(self, cell_list, num_replicas=None): | ||
"""Broadcast a Notify Partition Changes packet.""" | ||
if cell_list: | ||
ptid = self.pt.setNextID() | ||
self.pt.logUpdated() | ||
packet = Packets.NotifyPartitionChanges(ptid, cell_list) | ||
for node in self.nm.getIdentifiedList(): | ||
# As for broadcastNodesInformation, we don't send the full PT | ||
# when pending storage nodes are added, so keep them notified. | ||
if not node.isMaster(): | ||
node.send(packet) | ||
pt = self.pt | ||
if num_replicas is not None: | ||
pt.setReplicas(num_replicas) | ||
|
||
elif cell_list: | ||
num_replicas = pt.getReplicas() | ||
else: | ||
return | ||
packet = Packets.NotifyPartitionChanges( | ||
pt.setNextID(), num_replicas, cell_list) | ||
pt.logUpdated() | ||
for node in self.nm.getIdentifiedList(): | ||
# As for broadcastNodesInformation, we don't send the full PT | ||
# when pending storage nodes are added, so keep them notified. | ||
if not node.isMaster(): | ||
node.send(packet) | ||
def provideService(self): | ||
""" | ||
... | ... | @@ -437,16 +446,7 @@ class Application(BaseApplication): |
conn.send(notification_packet) | ||
elif conn.isServer(): | ||
continue | ||
if node.isClient(): | ||
if state == ClusterStates.RUNNING: | ||
handler = self.client_service_handler | ||
elif state == ClusterStates.BACKINGUP: | ||
handler = self.client_ro_service_handler | ||
else: | ||
if state != ClusterStates.STOPPING: | ||
conn.abort() | ||
continue | ||
elif node.isMaster(): | ||
if node.isMaster(): | ||
if state == ClusterStates.RECOVERING: | ||
handler = self.election_handler | ||
else: | ||
... | ... | @@ -454,10 +454,16 @@ class Application(BaseApplication): |
elif node.isStorage() and storage_handler: | ||
handler = storage_handler | ||
else: | ||
# There's a single handler type for admins. | ||
# Client can't change handler without being first disconnected. | ||
assert state in ( | ||
ClusterStates.STOPPING, | ||
ClusterStates.STOPPING_BACKUP, | ||
) or not node.isClient(), (state, node) | ||
continue # keep handler | ||
if type(handler) is not type(conn.getLastHandler()): | ||
conn.setHandler(handler) | ||
handler.connectionCompleted(conn, new=False) | ||
handler.handlerSwitched(conn, new=False) | ||
self.cluster_state = state | ||
def getNewUUID(self, uuid, address, node_type): | ||
... | ... |