diff --git a/ci/master.py b/ci/master.py index ab7bb5e..eb011c9 100644 --- a/ci/master.py +++ b/ci/master.py @@ -2,7 +2,7 @@ from buildbot.schedulers.triggerable import Triggerable from buildbot.changes.filter import ChangeFilter from buildbot.process.factory import BuildFactory -from buildbot.steps.master import MasterShellCommand +from buildbot.steps.main import MainShellCommand from buildscripts import steps as buildsteps @@ -30,11 +30,11 @@ def push_to_github(__opts__): cwd = 'sandboxes/{0}/public'.format(project) return [ - MasterShellCommand( + MainShellCommand( command=""" cd sandboxes/{0}/public - git pull --rebase private master - git push origin master""".format(project), + git pull --rebase private main + git push origin main""".format(project), description='Pushing commit to GitHub', descriptionDone='Push commit to GitHub (trunk)'), ] @@ -42,7 +42,7 @@ def push_to_github(__opts__): c['builders'].append(dict( name='{0} source'.format(project), - slavenames=['ubuntu1204'], + subordinatenames=['ubuntu1204'], factory=BuildFactory(steps= #buildsteps.svn(__opts__) + #buildsteps.bump_version(__opts__, setter='cat > src/scalarizr/version') + diff --git a/scripts/amiscripts-to-scalarizr.py b/scripts/amiscripts-to-scalarizr.py index 0ebc3ad..b98f897 100644 --- a/scripts/amiscripts-to-scalarizr.py +++ b/scripts/amiscripts-to-scalarizr.py @@ -23,7 +23,7 @@ host_conf = None farm_conf = None mysql_conf = None -master_conf = None +main_conf = None behaviour = None role_name = None snmp_community_name = None @@ -60,8 +60,8 @@ def main(): snmp_community_name = farm_conf['ACCESS_HASH'] if behaviour == 'mysql': - if host_conf['MYSQL_ROLE'] != 'master': - raise MigrationException('MySQL migration available only for replication master') + if host_conf['MYSQL_ROLE'] != 'main': + raise MigrationException('MySQL migration available only for replication main') mysql_conf = parse_amiscripts_config('/etc/aws/mysql.conf') mysql_storage = mysql_conf['MYSQL_STORAGE'] if mysql_storage == 'ebs': @@ -282,7 +282,7 @@ def configure_scalarizr_mysql_ebs(): logger.info('Regenerating Scalr mysql system users') passwds = hdlr._add_mysql_users('scalr', 'scalr_repl', 'scalr_stat') mysql_hdlr_conf = {'mysql' : { - 'replication_master' : '1', + 'replication_main' : '1', 'root_password' : passwds[0], 'repl_password' : passwds[1], 'stat_password' : passwds[2] diff --git a/scripts/update_win.py b/scripts/update_win.py index 1efe229..5dba0dc 100644 --- a/scripts/update_win.py +++ b/scripts/update_win.py @@ -56,8 +56,8 @@ def SvcDoRun(self): branch = branch.replace('/','-').replace('.','').strip() except: e = sys.exc_info()[1] - logger.debug('Could not obtain userdata: %s. Using master branch' % e) - branch = 'master' + logger.debug('Could not obtain userdata: %s. Using main branch' % e) + branch = 'main' logger.info('Detecting architecture') arch = platform.uname()[4] diff --git a/src/scalarizr/adm/commands/queryenv.py b/src/scalarizr/adm/commands/queryenv.py index 9c6f3a9..9ae8884 100755 --- a/src/scalarizr/adm/commands/queryenv.py +++ b/src/scalarizr/adm/commands/queryenv.py @@ -121,7 +121,7 @@ def _display_list_roles(self, out): 'index', 'internal-ip', 'external-ip', - 'replication-master'] + 'replication-main'] table_data = [] for d in out: behaviour = ', '.join(d.behaviour) @@ -132,7 +132,7 @@ def _display_list_roles(self, out): str(host.index), host.internal_ip, host.external_ip, - str(host.replication_master)]) + str(host.replication_main)]) print make_table(table_data, headers) def _display_list_virtual_hosts(self, out): diff --git a/src/scalarizr/api/mysql.py b/src/scalarizr/api/mysql.py index fb8c997..bc0f633 100755 --- a/src/scalarizr/api/mysql.py +++ b/src/scalarizr/api/mysql.py @@ -180,23 +180,23 @@ def _check_empty(self, param, name): @rpc.command_method def reset_password(self, new_password=None): """ - Reset password for MySQL user 'scalr_master'. Return new password + Reset password for MySQL user 'scalr_main'. Return new password """ if not new_password: new_password = pwgen(20) mysql_cli = mysql_svc.MySQLClient(__mysql__['root_user'], __mysql__['root_password']) - master_user = __mysql__['master_user'] + main_user = __mysql__['main_user'] - if mysql_cli.user_exists(master_user, 'localhost'): - mysql_cli.set_user_password(master_user, 'localhost', new_password) + if mysql_cli.user_exists(main_user, 'localhost'): + mysql_cli.set_user_password(main_user, 'localhost', new_password) else: - mysql_cli.create_user(master_user, 'localhost', new_password) + mysql_cli.create_user(main_user, 'localhost', new_password) - if mysql_cli.user_exists(master_user, '%'): - mysql_cli.set_user_password(master_user, '%', new_password) + if mysql_cli.user_exists(main_user, '%'): + mysql_cli.set_user_password(main_user, '%', new_password) else: - mysql_cli.create_user(master_user, '%', new_password) + mysql_cli.create_user(main_user, '%', new_password) mysql_cli.flush_privileges() @@ -212,23 +212,23 @@ def replication_status(self): """ mysql_cli = mysql_svc.MySQLClient(__mysql__['root_user'], __mysql__['root_password']) - if int(__mysql__['replication_master']): - master_status = mysql_cli.master_status() - result = {'master': {'status': 'up', - 'log_file': master_status[0], - 'log_pos': master_status[1]}} + if int(__mysql__['replication_main']): + main_status = mysql_cli.main_status() + result = {'main': {'status': 'up', + 'log_file': main_status[0], + 'log_pos': main_status[1]}} return result else: try: - slave_status = mysql_cli.slave_status() - slave_status = dict(zip(map(string.lower, slave_status.keys()), - slave_status.values())) - slave_running = slave_status['slave_io_running'] == 'Yes' and \ - slave_status['slave_sql_running'] == 'Yes' - slave_status['status'] = 'up' if slave_running else 'down' - return {'slave': slave_status} + subordinate_status = mysql_cli.subordinate_status() + subordinate_status = dict(zip(map(string.lower, subordinate_status.keys()), + subordinate_status.values())) + subordinate_running = subordinate_status['subordinate_io_running'] == 'Yes' and \ + subordinate_status['subordinate_sql_running'] == 'Yes' + subordinate_status['status'] = 'up' if subordinate_running else 'down' + return {'subordinate': subordinate_status} except ServiceError: - return {'slave': {'status': 'down'}} + return {'subordinate': {'status': 'down'}} @rpc.command_method @@ -240,7 +240,7 @@ def do_backup(op, backup_conf=None): try: purpose = '{0}-{1}'.format( __mysql__.behavior, - 'master' if int(__mysql__.replication_master) == 1 else 'slave') + 'main' if int(__mysql__.replication_main) == 1 else 'subordinate') backup = { 'type': 'mysqldump', 'cloudfs_dir': __node__.platform.scalrfs.backups('mysql'), diff --git a/src/scalarizr/api/postgresql.py b/src/scalarizr/api/postgresql.py index d27a4b6..1b1b03e 100755 --- a/src/scalarizr/api/postgresql.py +++ b/src/scalarizr/api/postgresql.py @@ -136,7 +136,7 @@ def get_service_status(self): @rpc.command_method def reset_password(self, new_password=None): """ - Resets password for PostgreSQL user 'scalr_master'. + Resets password for PostgreSQL user 'scalr_main'. :returns: New password :rtype: str @@ -144,12 +144,12 @@ def reset_password(self, new_password=None): if not new_password: new_password = pwgen(10) pg = postgresql_svc.PostgreSql() - if pg.master_user.exists(): - pg.master_user.change_role_password(new_password) - pg.master_user.change_system_password(new_password) + if pg.main_user.exists(): + pg.main_user.change_role_password(new_password) + pg.main_user.change_system_password(new_password) else: - pg.create_linux_user(pg.master_user.name, new_password) - pg.create_pg_role(pg.master_user.name, + pg.create_linux_user(pg.main_user.name, new_password) + pg.create_pg_role(pg.main_user.name, new_password, super=True, force=False) @@ -196,17 +196,17 @@ def replication_status(self): Examples:: - On master: + On main: - {'master': {'status': 'up'}} + {'main': {'status': 'up'}} - On broken slave: + On broken subordinate: - {'slave': {'status': 'down','error': }} + {'subordinate': {'status': 'down','error': }} - On normal slave: + On normal subordinate: - {'slave': {'status': 'up', 'xlog_delay': }} + {'subordinate': {'status': 'up', 'xlog_delay': }} """ psql = postgresql_svc.PSQL() @@ -219,14 +219,14 @@ def replication_status(self): raise e query_result = self._parse_query_out(query_out) - is_master = int(__postgresql__[OPT_REPLICATION_MASTER]) + is_main = int(__postgresql__[OPT_REPLICATION_MASTER]) if query_result['xlog_delay'] is None: - if is_master: - return {'master': {'status': 'up'}} - return {'slave': {'status': 'down', + if is_main: + return {'main': {'status': 'up'}} + return {'subordinate': {'status': 'down', 'error': query_result['error']}} - return {'slave': {'status': 'up', + return {'subordinate': {'status': 'up', 'xlog_delay': query_result['xlog_delay']}} @@ -321,7 +321,7 @@ def _single_backup(db_name): cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR) - suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave' + suffix = 'main' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'subordinate' backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix} LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags)) diff --git a/src/scalarizr/api/rabbitmq.py b/src/scalarizr/api/rabbitmq.py index 12ba03f..899ff36 100755 --- a/src/scalarizr/api/rabbitmq.py +++ b/src/scalarizr/api/rabbitmq.py @@ -93,7 +93,7 @@ def get_service_status(self): @rpc.command_method def reset_password(self, new_password=None): """ - Resets password for RabbitMQ user 'scalr_master'. + Resets password for RabbitMQ user 'scalr_main'. :param new_password: New password. If not provided, 10-character string will be generated. :type new_password: str @@ -104,7 +104,7 @@ def reset_password(self, new_password=None): """ if not new_password: new_password = pwgen(10) - self.rabbitmq.check_master_user(new_password) + self.rabbitmq.check_main_user(new_password) return new_password @classmethod diff --git a/src/scalarizr/api/redis.py b/src/scalarizr/api/redis.py index 7c402a5..eed44d3 100755 --- a/src/scalarizr/api/redis.py +++ b/src/scalarizr/api/redis.py @@ -149,9 +149,9 @@ def launch_processes(self, num=None, ports=None, passwords=None, async=False): raise AssertionError('Number of ports must be equal to number of passwords') if num and ports and num != len(ports): raise AssertionError('When ports range is passed its length must be equal to num parameter') - if not __redis__["replication_master"]: + if not __redis__["replication_main"]: if not passwords or not ports: - raise AssertionError('ports and passwords are required to launch processes on redis slave') + raise AssertionError('ports and passwords are required to launch processes on redis subordinate') available_ports = self.available_ports if num > len(available_ports): raise AssertionError('Cannot launch %s new processes: Ports available: %s' % (num, str(available_ports))) @@ -198,7 +198,7 @@ def _launch(self, ports=None, passwords=None, op=None): for port, password in zip(ports, passwords or [None for port in ports]): log.info('Launch Redis %s on port %s', - 'Master' if __redis__["replication_master"] else 'Slave', port) + 'Main' if __redis__["replication_main"] else 'Subordinate', port) if iptables.enabled(): iptables.FIREWALL.ensure({ @@ -209,10 +209,10 @@ def _launch(self, ports=None, passwords=None, op=None): redis_process = redis_service.Redis(port, password) if not redis_process.service.running: - if __redis__["replication_master"]: - current_password = redis_process.init_master(STORAGE_PATH) + if __redis__["replication_main"]: + current_password = redis_process.init_main(STORAGE_PATH) else: - current_password = redis_process.init_slave(STORAGE_PATH, primary_ip, port) + current_password = redis_process.init_subordinate(STORAGE_PATH, primary_ip, port) new_passwords.append(current_password) new_ports.append(port) log.debug('Redis process has been launched on port %s with password %s' % (port, current_password)) @@ -227,7 +227,7 @@ def _shutdown(self, ports, remove_data=False, op=None): freed_ports = [] for port in ports: log.info('Shutdown Redis %s on port %s' % ( - 'Master' if __redis__["replication_master"] else 'Slave', port)) + 'Main' if __redis__["replication_main"] else 'Subordinate', port)) instance = redis_service.Redis(port=port) if instance.service.running: @@ -303,18 +303,18 @@ def persistence_type(self): return __redis__["persistence_type"] def get_primary_ip(self): - master_host = None - LOG.info("Requesting master server") - while not master_host: + main_host = None + LOG.info("Requesting main server") + while not main_host: try: - master_host = list( + main_host = list( host for host in self._queryenv.list_roles(behaviour=BEHAVIOUR)[0].hosts - if host.replication_master)[0] + if host.replication_main)[0] except IndexError: - LOG.debug("QueryEnv responded with no %s master. " % BEHAVIOUR + + LOG.debug("QueryEnv responded with no %s main. " % BEHAVIOUR + "Waiting %d seconds before the next attempt" % 5) time.sleep(5) - host = master_host.internal_ip or master_host.external_ip + host = main_host.internal_ip or main_host.external_ip LOG.debug('primary IP: %s', host) return host @@ -327,14 +327,14 @@ def reset_password(self, port=__redis__['defaults']['port'], new_password=None): redis_conf = redis_service.RedisConf.find(port=port) redis_conf.requirepass = new_password - if redis_conf.slaveof: - redis_conf.masterauth = new_password + if redis_conf.subordinateof: + redis_conf.mainauth = new_password redis_wrapper = redis_service.Redis(port=port, password=new_password) redis_wrapper.service.reload() if int(port) == __redis__['defaults']['port']: - __redis__["master_password"] = new_password + __redis__["main_password"] = new_password return new_password @@ -348,23 +348,23 @@ def replication_status(self): """ ri = redis_service.RedisInstances() - if __redis__["replication_master"]: - masters = {} + if __redis__["replication_main"]: + mains = {} for port in ri.ports: - masters[port] = {'status': 'up'} - return {'masters': masters} + mains[port] = {'status': 'up'} + return {'mains': mains} - slaves = {} + subordinates = {} for redis_process in ri.instances: repl_data = {} for key, val in redis_process.redis_cli.info.items(): - if key.startswith('master'): + if key.startswith('main'): repl_data[key] = val - if 'master_link_status' in repl_data: - repl_data['status'] = repl_data['master_link_status'] - slaves[redis_process.port] = repl_data + if 'main_link_status' in repl_data: + repl_data['status'] = repl_data['main_link_status'] + subordinates[redis_process.port] = repl_data - return {'slaves': slaves} + return {'subordinates': subordinates} @rpc.command_method def create_databundle(self, async=True): diff --git a/src/scalarizr/handlers/__init__.py b/src/scalarizr/handlers/__init__.py index ad7e215..7293705 100755 --- a/src/scalarizr/handlers/__init__.py +++ b/src/scalarizr/handlers/__init__.py @@ -570,7 +570,7 @@ class DbMsrMessages: @ivar: status: Operation status [ ok | error ] @ivar: last_error: errmsg if status = error @ivar: snapshot_config: snapshot configuration - @ivar: current_xlog_location: pg_current_xlog_location() on master after snap was created + @ivar: current_xlog_location: pg_current_xlog_location() on main after snap was created ''' DBMSR_CREATE_BACKUP = "DbMsr_CreateBackup" @@ -584,28 +584,28 @@ class DbMsrMessages: @ivar: backup_parts: URL List (s3, cloudfiles) ''' - DBMSR_PROMOTE_TO_MASTER = "DbMsr_PromoteToMaster" + DBMSR_PROMOTE_TO_MASTER = "DbMsr_PromoteToMain" - DBMSR_PROMOTE_TO_MASTER_RESULT = "DbMsr_PromoteToMasterResult" + DBMSR_PROMOTE_TO_MASTER_RESULT = "DbMsr_PromoteToMainResult" ''' @ivar: db_type: postgresql|mysql @ivar: status: ok|error @ivar: last_error: errmsg if status=error @ivar: volume_config: volume configuration @ivar: snapshot_config?: snapshot configuration - @ivar: current_xlog_location_?: pg_current_xlog_location() on master after snap was created + @ivar: current_xlog_location_?: pg_current_xlog_location() on main after snap was created ''' - DBMSR_NEW_MASTER_UP = "DbMsr_NewMasterUp" + DBMSR_NEW_MASTER_UP = "DbMsr_NewMainUp" ''' @ivar: db_type: postgresql|mysql @ivar: local_ip @ivar: remote_ip @ivar: snapshot_config - @ivar: current_xlog_location: pg_current_xlog_location() on master after snap was created + @ivar: current_xlog_location: pg_current_xlog_location() on main after snap was created ''' - DBMSR_NEW_MASTER_UP_RESULT = "DbMsr_NewMasterUpResult" + DBMSR_NEW_MASTER_UP_RESULT = "DbMsr_NewMainUpResult" """ Also Postgresql behaviour adds params to common messages: @@ -613,27 +613,27 @@ class DbMsrMessages: = HOST_INIT_RESPONSE = @ivar db_type: postgresql|mysql @ivar postgresql=dict( - replication_master: 1|0 + replication_main: 1|0 root_user - root_password: 'scalr' user password (on slave) + root_password: 'scalr' user password (on subordinate) root_ssh_private_key root_ssh_public_key current_xlog_location - volume_config: Master storage configuration (on master) - snapshot_config: Master storage snapshot (both) + volume_config: Main storage configuration (on main) + snapshot_config: Main storage snapshot (both) ) = HOST_UP = @ivar db_type: postgresql|mysql @ivar postgresql=dict( - replication_master: 1|0 + replication_main: 1|0 root_user - root_password: 'scalr' user password (on master) + root_password: 'scalr' user password (on main) root_ssh_private_key root_ssh_public_key current_xlog_location volume_config: Current storage configuration (both) - snapshot_config: Master storage snapshot (on master) + snapshot_config: Main storage snapshot (on main) ) """ diff --git a/src/scalarizr/handlers/ip_list_builder.py b/src/scalarizr/handlers/ip_list_builder.py index 47a0e0f..0f32381 100755 --- a/src/scalarizr/handlers/ip_list_builder.py +++ b/src/scalarizr/handlers/ip_list_builder.py @@ -53,7 +53,7 @@ def accept(self, message, queue, behaviour=None, platform=None, os=None, dist=No or message.name == Messages.BEFORE_HOST_TERMINATE \ or message.name == Messages.REBOOT_START \ or message.name == Messages.REBOOT_FINISH \ - or message.name == 'Mysql_NewMasterUp' + or message.name == 'Mysql_NewMainUp' def on_start(self, *args): cnf = bus.cnf @@ -81,7 +81,7 @@ def _rebuild(self): role.behaviour, ipaddr, modfn=self._create_file, - replication_master=host.replication_master) + replication_main=host.replication_main) def on_HostUp(self, message): behaviour = message.behaviour @@ -96,7 +96,7 @@ def on_HostUp(self, message): rolename, behaviour, ip) self._modify_tree(rolename, behaviour, ip, modfn=self._create_file, - replication_master='mysql' in behaviour and self._host_is_replication_master(ip, 'mysql')) + replication_main='mysql' in behaviour and self._host_is_replication_main(ip, 'mysql')) def on_HostDown(self, message): behaviour = message.behaviour @@ -111,33 +111,33 @@ def on_HostDown(self, message): rolename, behaviour, ip) self._modify_tree(rolename, behaviour, ip, modfn=self._remove_file, - replication_master='mysql' in behaviour and self._host_is_replication_master(ip, 'mysql')) + replication_main='mysql' in behaviour and self._host_is_replication_main(ip, 'mysql')) on_BeforeHostTerminate = on_HostDown - def on_Mysql_NewMasterUp(self, message): + def on_Mysql_NewMainUp(self, message): ip = message.local_ip or message.remote_ip if ip: - self._remove_file(os.path.join(self._base_path, 'mysql-slave', ip)) + self._remove_file(os.path.join(self._base_path, 'mysql-subordinate', ip)) - master_path = os.path.join(self._base_path, 'mysql-master') - if os.path.exists(master_path): - shutil.rmtree(master_path) - self._create_dir(master_path) - self._create_file(os.path.join(master_path, ip)) + main_path = os.path.join(self._base_path, 'mysql-main') + if os.path.exists(main_path): + shutil.rmtree(main_path) + self._create_dir(main_path) + self._create_file(os.path.join(main_path, ip)) on_RebootStart = on_HostDown on_RebootFinish = on_HostUp - def _modify_tree(self, rolename, behaviours, ip, modfn=None, replication_master=None): + def _modify_tree(self, rolename, behaviours, ip, modfn=None, replication_main=None): # Touch/Unlink %role_name%/xx.xx.xx.xx modfn(os.path.join(self._base_path, rolename, ip)) for behaviour in behaviours: if behaviour == BuiltinBehaviours.MYSQL: - suffix = "master" if replication_master else "slave" - # Touch/Unlink mysql-(master|slave)/xx.xx.xx.xx + suffix = "main" if replication_main else "subordinate" + # Touch/Unlink mysql-(main|subordinate)/xx.xx.xx.xx mysql_path = os.path.join(self._base_path, "mysql-" + suffix) modfn(os.path.join(mysql_path, ip)) else: @@ -178,7 +178,7 @@ def _remove_file(self, f): self._logger.error(x) self._remove_dir(os.path.dirname(f)) - def _host_is_replication_master(self, ip, behaviour): + def _host_is_replication_main(self, ip, behaviour): try: received_roles = self._queryenv.list_roles(behaviour=behaviour) except: @@ -188,5 +188,5 @@ def _host_is_replication_master(self, ip, behaviour): for role in received_roles: for host in role.hosts: if ip == host.internal_ip: - return host.replication_master + return host.replication_main return False diff --git a/src/scalarizr/handlers/mongodb.py b/src/scalarizr/handlers/mongodb.py index 7ea9483..3cb4255 100755 --- a/src/scalarizr/handlers/mongodb.py +++ b/src/scalarizr/handlers/mongodb.py @@ -98,19 +98,19 @@ class MongoDBMessages: = HOST_INIT_RESPONSE = @ivar MongoDB=dict( key_file A key file with at least 6 Base64 characters - volume_config Master storage configuration (on master) - snapshot_config Master storage snapshot (both) + volume_config Main storage configuration (on main) + snapshot_config Main storage snapshot (both) ) = HOST_UP = @ivar mysql=dict( - root_password: 'scalr' user password (on master) - repl_password: 'scalr_repl' user password (on master) - stat_password: 'scalr_stat' user password (on master) - log_file: Binary log file (on master) - log_pos: Binary log file position (on master) + root_password: 'scalr' user password (on main) + repl_password: 'scalr_repl' user password (on main) + stat_password: 'scalr_stat' user password (on main) + log_file: Binary log file (on main) + log_pos: Binary log file position (on main) volume_config: Current storage configuration (both) - snapshot_config: Master storage snapshot (on master) + snapshot_config: Main storage snapshot (on main) ) """ @@ -195,15 +195,15 @@ def __init__(self): '%s_data_bundle' % BEHAVIOUR, - # @param host: New master hostname - 'before_%s_change_master' % BEHAVIOUR, + # @param host: New main hostname + 'before_%s_change_main' % BEHAVIOUR, - # @param host: New master hostname - '%s_change_master' % BEHAVIOUR, + # @param host: New main hostname + '%s_change_main' % BEHAVIOUR, - 'before_slave_promote_to_master', + 'before_subordinate_promote_to_main', - 'slave_promote_to_master' + 'subordinate_promote_to_main' ) self.api = mongodb_api.MongoDBAPI() @@ -315,7 +315,7 @@ def on_host_init_response(self, message): def on_before_host_up(self, hostup_msg): """ - Check that replication is up in both master and slave cases + Check that replication is up in both main and subordinate cases @type hostup_msg: scalarizr.messaging.Message @param hostup_msg: HostUp message """ @@ -357,11 +357,11 @@ def on_before_host_up(self, hostup_msg): rs_name = RS_NAME_TPL % self.shard_index if first_in_rs: - log.info('Initialize Master') - self._init_master(hostup_msg, rs_name) + log.info('Initialize Main') + self._init_main(hostup_msg, rs_name) else: - log.info('Initialize Slave') - self._init_slave(hostup_msg, rs_name) + log.info('Initialize Subordinate') + self._init_subordinate(hostup_msg, rs_name) possible_self_arbiter = "%s:%s" % (self.hostname, mongo_svc.ARBITER_DEFAULT_PORT) if possible_self_arbiter in self.mongodb.arbiters: @@ -572,13 +572,13 @@ def on_MongoDb_IntCreateBootstrapWatcher(self, message): wait_until(lambda: self.mongodb.primary_host, timeout=180, start_text='Wait for primary node in replica set', logger=self._logger) - is_master = self.mongodb.is_replication_master + is_main = self.mongodb.is_replication_main - if is_master and self.shard_index == shard_idx: + if is_main and self.shard_index == shard_idx: nodename = '%s:%s' % (hostname, mongo_svc.REPLICA_DEFAULT_PORT) if nodename not in self.mongodb.replicas: - self.mongodb.register_slave(hostname, mongo_svc.REPLICA_DEFAULT_PORT) + self.mongodb.register_subordinate(hostname, mongo_svc.REPLICA_DEFAULT_PORT) else: self._logger.warning('Host %s is already in replica set.' % nodename) @@ -661,7 +661,7 @@ def on_HostUp(self, message): self._logger.debug('Flushing router configuration') self.mongodb.router_cli.flush_router_cfg() - if self.mongodb.is_replication_master and \ + if self.mongodb.is_replication_main and \ self.shard_index == new_host_shard_idx: r = len(self.mongodb.replicas) a = len(self.mongodb.arbiters) @@ -672,7 +672,7 @@ def on_HostUp(self, message): for arbiter in self.mongodb.arbiters: arb_host, arb_port = arbiter.split(':') arb_port = int(arb_port) - self.mongodb.unregister_slave(arb_host, arb_port) + self.mongodb.unregister_subordinate(arb_host, arb_port) self.mongodb.stop_arbiter() else: if len(self.mongodb.replicas) % 2 != 0: @@ -745,17 +745,17 @@ def on_HostDown(self, message): self._logger.debug("Got %s from node %s but ip address doesn't match.", message.name, down_node_host) return - is_master = self.mongodb.is_replication_master + is_main = self.mongodb.is_replication_main - if not is_master and len(self.mongodb.replicas) == 2: + if not is_main and len(self.mongodb.replicas) == 2: local_ip = self._platform.get_private_ip() possible_self_arbiter = "%s:%s" % (local_ip, mongo_svc.ARBITER_DEFAULT_PORT) try: if possible_self_arbiter in self.mongodb.arbiters: """ Start arbiter if it's not running """ self.mongodb.arbiter.start() - """ Wait until we become master """ - wait_until(lambda: self.mongodb.is_replication_master, timeout=180) + """ Wait until we become main """ + wait_until(lambda: self.mongodb.is_replication_main, timeout=180) else: raise Exception('Arbiter not found') except: @@ -765,14 +765,14 @@ def on_HostDown(self, message): rs_cfg['members'] = [m for m in rs_cfg['members'] if m['host'] == nodename] self.mongodb.cli.rs_reconfig(rs_cfg, force=True) try: - wait_until(lambda: self.mongodb.is_replication_master, timeout=30) + wait_until(lambda: self.mongodb.is_replication_main, timeout=30) except: """ Looks like mongo stuck in secondary state (syncingTo dead node) Restart should fix this """ if "seconds reached" in str(sys.exc_info()[1]): self.mongodb.mongod.restart(reason="Reconfiguring replica set") - wait_until(lambda: self.mongodb.is_replication_master, timeout=30) + wait_until(lambda: self.mongodb.is_replication_main, timeout=30) else: raise else: @@ -780,15 +780,15 @@ def on_HostDown(self, message): start_text='Wait for primary node in replica set', logger=self._logger) - if self.mongodb.is_replication_master: + if self.mongodb.is_replication_main: """ Remove host from replica set""" - self.mongodb.unregister_slave(down_node_host) + self.mongodb.unregister_subordinate(down_node_host) """ If arbiter was running on the node - unregister it """ possible_arbiter = "%s:%s" % (down_node_host, mongo_svc.ARBITER_DEFAULT_PORT) if possible_arbiter in self.mongodb.arbiters: - self.mongodb.unregister_slave(down_node_host, mongo_svc.ARBITER_DEFAULT_PORT) + self.mongodb.unregister_subordinate(down_node_host, mongo_svc.ARBITER_DEFAULT_PORT) """ Start arbiter if necessary """ if len(self.mongodb.replicas) % 2 == 0: @@ -798,12 +798,12 @@ def on_HostDown(self, message): for arbiter in self.mongodb.arbiters: arb_host, arb_port = arbiter.split(':') arb_port = int(arb_port) - self.mongodb.unregister_slave(arb_host, arb_port) + self.mongodb.unregister_subordinate(arb_host, arb_port) self.mongodb.stop_arbiter() else: """ Get all replicas except down one, - since we don't know if master already removed + since we don't know if main already removed node from replica set """ replicas = [r for r in self.mongodb.replicas if r != down_node_name] @@ -862,7 +862,7 @@ def on_BeforeHostTerminate(self, message): STATE[CLUSTER_STATE_KEY] = MongoDBClusterStates.TERMINATING storage_vol = __mongodb__['volume'] - if self.mongodb.is_replication_master: + if self.mongodb.is_replication_main: self.mongodb.cli.step_down(180, force=True) self.mongodb.stop_arbiter() self.mongodb.stop_config_server() @@ -938,8 +938,8 @@ def on_MongoDb_CreateDataBundle(self, message): def _create_data_bundle(self): - if not self.mongodb.is_replication_master: - self._logger.debug('Not a master. Skipping data bundle') + if not self.mongodb.is_replication_main: + self._logger.debug('Not a main. Skipping data bundle') return try: @@ -974,9 +974,9 @@ def _create_data_bundle(self): self.mongodb.router_cli.start_balancer() - def _init_master(self, message, rs_name): + def _init_main(self, message, rs_name): """ - Initialize mongodb master + Initialize mongodb main @type message: scalarizr.messaging.Message @param message: HostUp message """ @@ -1005,14 +1005,14 @@ def _init_master(self, message, rs_name): rs_cfg['version'] += 10 self.mongodb.cli.rs_reconfig(rs_cfg, force=True) - wait_until(lambda: self.mongodb.is_replication_master, timeout=180) + wait_until(lambda: self.mongodb.is_replication_main, timeout=180) self.mongodb.cli.create_or_update_admin_user(mongo_svc.SCALR_USER, self.scalr_password) self.mongodb.mongod.stop("Terminating mongod instance to run it with --auth option") self.mongodb.auth = True self.mongodb.start_shardsvr() self.mongodb.cli.auth(mongo_svc.SCALR_USER, self.scalr_password) - wait_until(lambda: self.mongodb.is_replication_master, sleep=5, logger=self._logger, + wait_until(lambda: self.mongodb.is_replication_main, sleep=5, logger=self._logger, timeout=120, start_text='Wait until node becomes replication primary') # Create snapshot #self.mongodb.cli.sync(lock=True) @@ -1054,9 +1054,9 @@ def plug_storage(self): __mongodb__['volume'] = storage_volume - def _init_slave(self, message, rs_name): + def _init_subordinate(self, message, rs_name): """ - Initialize mongodb slave + Initialize mongodb subordinate @type message: scalarizr.messaging.Message @param message: HostUp message """ @@ -1168,7 +1168,7 @@ def request_and_wait_replication_status(): cdb_result_received = True try: if msg.status == 'ok': - self._logger.info('Received data bundle from master node.') + self._logger.info('Received data bundle from main node.') self.mongodb.mongod.stop() storage_vol.detach() @@ -1237,14 +1237,14 @@ def on_MongoDb_IntClusterTerminate(self, message): STATE[CLUSTER_STATE_KEY] = MongoDBClusterStates.TERMINATING if not self.mongodb.mongod.is_running: self.mongodb.start_shardsvr() - is_replication_master = self.mongodb.is_replication_master + is_replication_main = self.mongodb.is_replication_main self.mongodb.mongod.stop() self.mongodb.stop_config_server() msg_body = dict(status='ok', shard_index=self.shard_index, replica_set_index=self.rs_id, - is_master=int(is_replication_master)) + is_main=int(is_replication_main)) except: msg_body = dict(status='error', last_error=str(sys.exc_info()[1]), @@ -1656,7 +1656,7 @@ def run(self): if 'last_error' in self.full_status[shard_id][rs_id]: del self.full_status[shard_id][rs_id]['last_error'] self.full_status[shard_id][rs_id]['status'] = TerminationState.TERMINATED - self.full_status[shard_id][rs_id]['is_master'] = int(msg.is_master) + self.full_status[shard_id][rs_id]['is_main'] = int(msg.is_main) else: self.full_status[shard_id][rs_id]['status'] = TerminationState.FAILED self.full_status[shard_id][rs_id]['last_error'] = msg.last_error diff --git a/src/scalarizr/handlers/mysql.py b/src/scalarizr/handlers/mysql.py index ce02bfa..cf8e80e 100755 --- a/src/scalarizr/handlers/mysql.py +++ b/src/scalarizr/handlers/mysql.py @@ -59,7 +59,7 @@ MYSQLDUMP = which('mysqldump') MYCNF = '/etc/my.cnf' if linux.os.redhat_family else '/etc/mysql/my.cnf' -change_master_timeout = 30 +change_main_timeout = 30 class MysqlInitScript(initdv2.ParametrizedInitScript): @@ -173,7 +173,7 @@ class MysqlServiceConfigurator: OPT_ROOT_PASSWORD = "root_password" OPT_REPL_PASSWORD = "repl_password" OPT_STAT_PASSWORD = "stat_password" -OPT_REPLICATION_MASTER = "replication_master" +OPT_REPLICATION_MASTER = "replication_main" OPT_LOG_FILE = "log_file" OPT_LOG_POS = "log_pos" OPT_VOLUME_CNF = 'volume_config' @@ -183,7 +183,7 @@ class MysqlServiceConfigurator: OPT_MYSQL_PATH = 'mysql_path' OPT_MYSQLDUMP_PATH = 'mysqldump_path' OPT_MYCNF_PATH = 'mycnf_path' -OPT_CHANGE_MASTER_TIMEOUT = 'change_master_timeout' +OPT_CHANGE_MASTER_TIMEOUT = 'change_main_timeout' # System users ROOT_USER = "scalr" @@ -264,10 +264,10 @@ def fetchdict(self, query): 1. Cloud support pluggable disks (ex: EBS) +-------+ +------------------+ +--------+ -| Scalr | | Slave1 -> Master | | Slave2 | +| Scalr | | Subordinate1 -> Main | | Subordinate2 | +-------+ +------------------+ +--------+ - Mysql_PromoteToMaster + Mysql_PromoteToMain - root_password - repl_password - stat_password @@ -276,15 +276,15 @@ def fetchdict(self, query): STOP SLAVE vol = Storage(volume_config) - vol.detach() from Master - vol.attach() to Slave1 + vol.detach() from Main + vol.attach() to Subordinate1 start mysql - Mysql_PromoteToMasterResult + Mysql_PromoteToMainResult - volume_config <-------------------------- - Mysql_NewMasterUp + Mysql_NewMainUp - root_password - repl_password - stat_password @@ -295,10 +295,10 @@ def fetchdict(self, query): 2. Cloud has no pluggable disks (ex: Rackspace) +-------+ +------------------+ +--------+ -| Scalr | | Slave1 -> Master | | Slave2 | +| Scalr | | Subordinate1 -> Main | | Subordinate2 | +-------+ +------------------+ +--------+ - Mysql_PromoteToMaster + Mysql_PromoteToMain - root_password - repl_password - stat_password @@ -309,13 +309,13 @@ def fetchdict(self, query): start mysql create snapshot - Mysql_PromoteToMasterResult + Mysql_PromoteToMainResult - snapshot_config - log_file - log_pos <---------------------------- - Mysql_NewMasterUp + Mysql_NewMainUp - local_ip - repl_password - snapshot_config @@ -365,25 +365,25 @@ class MysqlMessages: @ivar farm_role_id """ - PROMOTE_TO_MASTER = "Mysql_PromoteToMaster" + PROMOTE_TO_MASTER = "Mysql_PromoteToMain" """ @ivar root_password: 'scalr' user password @ivar repl_password: 'scalr_repl' user password @ivar stat_password: 'scalr_stat' user password - @ivar volume_config?: Master storage configuration + @ivar volume_config?: Main storage configuration """ - PROMOTE_TO_MASTER_RESULT = "Mysql_PromoteToMasterResult" + PROMOTE_TO_MASTER_RESULT = "Mysql_PromoteToMainResult" """ @ivar status: ok|error @ivar last_error: Last error message in case of status = 'error' - @ivar volume_config: Master storage configuration + @ivar volume_config: Main storage configuration @ivar snapshot_config? @ivar log_file? @ivar log_pos? """ - NEW_MASTER_UP = "Mysql_NewMasterUp" + NEW_MASTER_UP = "Mysql_NewMainUp" """ @ivar behaviour @ivar local_ip @@ -404,25 +404,25 @@ class MysqlMessages: = HOST_INIT_RESPONSE = @ivar mysql=dict( - replication_master: 1|0 - root_password: 'scalr' user password (on slave) - repl_password: 'scalr_repl' user password (on slave) - stat_password: 'scalr_stat' user password (on slave) - log_file: Binary log file (on slave) - log_pos: Binary log file position (on slave) - volume_config Master storage configuration (on master) - snapshot_config Master storage snapshot (both) + replication_main: 1|0 + root_password: 'scalr' user password (on subordinate) + repl_password: 'scalr_repl' user password (on subordinate) + stat_password: 'scalr_stat' user password (on subordinate) + log_file: Binary log file (on subordinate) + log_pos: Binary log file position (on subordinate) + volume_config Main storage configuration (on main) + snapshot_config Main storage snapshot (both) ) = HOST_UP = @ivar mysql=dict( - root_password: 'scalr' user password (on master) - repl_password: 'scalr_repl' user password (on master) - stat_password: 'scalr_stat' user password (on master) - log_file: Binary log file (on master) - log_pos: Binary log file position (on master) + root_password: 'scalr' user password (on main) + repl_password: 'scalr_repl' user password (on main) + stat_password: 'scalr_stat' user password (on main) + log_file: Binary log file (on main) + log_pos: Binary log file position (on main) volume_config: Current storage configuration (both) - snapshot_config: Master storage snapshot (on master) + snapshot_config: Main storage snapshot (on main) ) """ @@ -633,17 +633,17 @@ def __init__(self): 'mysql_data_bundle', - # @param host: New master hostname - 'before_mysql_change_master', + # @param host: New main hostname + 'before_mysql_change_main', - # @param host: New master hostname + # @param host: New main hostname # @param log_file: log file to start from # @param log_pos: log pos to start from - 'mysql_change_master' + 'mysql_change_main' - 'before_slave_promote_to_master', + 'before_subordinate_promote_to_main', - 'slave_promote_to_master' + 'subordinate_promote_to_main' ) self.on_reload() @@ -712,10 +712,10 @@ def on_reload(self): ini = self._cnf.rawini self._role_name = ini.get(config.SECT_GENERAL, config.OPT_ROLE_NAME) try: - self._change_master_timeout = globals()['change_master_timeout'] = int( + self._change_main_timeout = globals()['change_main_timeout'] = int( ini.get(CNF_SECTION, OPT_CHANGE_MASTER_TIMEOUT) or '30') except ConfigParser.Error: - self._change_master_timeout = globals()['change_master_timeout'] = 30 + self._change_main_timeout = globals()['change_main_timeout'] = 30 self._storage_path = STORAGE_PATH self._data_dir = os.path.join(self._storage_path, STORAGE_DATA_DIR) @@ -764,9 +764,9 @@ def on_Mysql_ConvertToDbmsr(self, message): def on_Mysql_CreatePmaUser(self, message): try: - # Operation allowed only on Master server + # Operation allowed only on Main server if not int(self._cnf.rawini.get(CNF_SECTION, OPT_REPLICATION_MASTER)): - raise HandlerError('Cannot add pma user on slave. It should be a Master server') + raise HandlerError('Cannot add pma user on subordinate. It should be a Main server') root_password, = self._get_ini_options(OPT_ROOT_PASSWORD) pma_server_ip = message.pma_server_ip @@ -930,7 +930,7 @@ def _innodb_recovery(self, storage_path=None): '--skip-networking', '--skip-grant', '--bootstrap', - '--skip-slave-start') + '--skip-subordinate-start') ''' if ndb_support: mysqld_safe_cmd += ('--skip-ndbcluster',) @@ -950,32 +950,32 @@ def _data_bundle_description(self): @_reload_mycnf - def on_Mysql_PromoteToMaster(self, message): + def on_Mysql_PromoteToMain(self, message): """ - Promote slave to master + Promote subordinate to main @type message: scalarizr.messaging.Message - @param message: Mysql_PromoteToMaster + @param message: Mysql_PromoteToMain """ old_conf = None new_storage_vol = None if not int(self._cnf.rawini.get(CNF_SECTION, OPT_REPLICATION_MASTER)): - bus.fire('before_slave_promote_to_master') + bus.fire('before_subordinate_promote_to_main') if bus.scalr_version >= (2, 2): - master_storage_conf = message.body.get('volume_config') + main_storage_conf = message.body.get('volume_config') else: if 'volume_id' in message.body: - master_storage_conf = dict(type='ebs', id=message.body['volume_id']) + main_storage_conf = dict(type='ebs', id=message.body['volume_id']) else: - master_storage_conf = None + main_storage_conf = None tx_complete = False try: # Stop mysql - if master_storage_conf and master_storage_conf['type'] != 'eph': + if main_storage_conf and main_storage_conf['type'] != 'eph': if self._init_script.running: mysql = spawn_mysql_cli(ROOT_USER, message.root_password) timeout = 180 @@ -984,18 +984,18 @@ def on_Mysql_PromoteToMaster(self, message): mysql.expect("mysql>", timeout=timeout) except pexpect.TIMEOUT: raise HandlerError("Timeout (%d seconds) reached " - "while waiting for slave stop" % (timeout,)) + "while waiting for subordinate stop" % (timeout,)) finally: mysql.close() - self._stop_service('Swapping storages to promote slave to master') + self._stop_service('Swapping storages to promote subordinate to main') - # Unplug slave storage and plug master one - #self._unplug_storage(slave_vol_id, self._storage_path) + # Unplug subordinate storage and plug main one + #self._unplug_storage(subordinate_vol_id, self._storage_path) old_conf = self.storage_vol.detach(force=True) # ?????? - #master_vol = self._take_master_volume(master_vol_id) - #self._plug_storage(master_vol.id, self._storage_path) - new_storage_vol = self._plug_storage(self._storage_path, master_storage_conf) - # Continue if master storage is a valid MySQL storage + #main_vol = self._take_main_volume(main_vol_id) + #self._plug_storage(main_vol.id, self._storage_path) + new_storage_vol = self._plug_storage(self._storage_path, main_storage_conf) + # Continue if main storage is a valid MySQL storage if self._storage_valid(): # Patch configuration files self._move_mysql_dir('mysqld/log_bin', self._binlog_base) @@ -1020,7 +1020,7 @@ def on_Mysql_PromoteToMaster(self, message): else: raise HandlerError("%s is not a valid MySQL storage" % self._storage_path) - elif not master_storage_conf or master_storage_conf['type'] == 'eph': + elif not main_storage_conf or main_storage_conf['type'] == 'eph': self._start_service() mysql = spawn_mysql_cli(ROOT_USER, message.root_password) timeout = 180 @@ -1030,10 +1030,10 @@ def on_Mysql_PromoteToMaster(self, message): mysql.sendline("RESET MASTER;") mysql.expect("mysql>", 20) coreutils.remove(os.path.join(self._data_dir, 'relay-log.info')) - coreutils.remove(os.path.join(self._data_dir, 'master.info')) + coreutils.remove(os.path.join(self._data_dir, 'main.info')) except pexpect.TIMEOUT: msg = "Timeout (%d seconds) reached " \ - "while waiting for slave stop and master reset." % (timeout,) + "while waiting for subordinate stop and main reset." % (timeout,) raise HandlerError(msg) finally: mysql.close() @@ -1059,7 +1059,7 @@ def on_Mysql_PromoteToMaster(self, message): self.send_message(MysqlMessages.PROMOTE_TO_MASTER_RESULT, msg_data) tx_complete = True - bus.fire('slave_promote_to_master') + bus.fire('subordinate_promote_to_main') except (Exception, BaseException), e: LOG.exception(e) @@ -1067,7 +1067,7 @@ def on_Mysql_PromoteToMaster(self, message): if new_storage_vol: new_storage_vol.detach() - # Get back slave storage + # Get back subordinate storage if old_conf: self._plug_storage(self._storage_path, old_conf) @@ -1079,35 +1079,35 @@ def on_Mysql_PromoteToMaster(self, message): # Start MySQL self._start_service() - if tx_complete and master_storage_conf and master_storage_conf['type'] != 'eph': - # Delete slave EBS + if tx_complete and main_storage_conf and main_storage_conf['type'] != 'eph': + # Delete subordinate EBS self.storage_vol.destroy(remove_disks=True) self.storage_vol = new_storage_vol Storage.backup_config(self.storage_vol.config(), self._volume_config_path) else: - LOG.warning('Cannot promote to master. Already master') + LOG.warning('Cannot promote to main. Already main') @_reload_mycnf - def on_Mysql_NewMasterUp(self, message): + def on_Mysql_NewMainUp(self, message): """ - Switch replication to a new master server + Switch replication to a new main server @type message: scalarizr.messaging.Message - @param message: Mysql_NewMasterUp + @param message: Mysql_NewMainUp """ - is_repl_master, = self._get_ini_options(OPT_REPLICATION_MASTER) + is_repl_main, = self._get_ini_options(OPT_REPLICATION_MASTER) - if int(is_repl_master): - LOG.debug('Skip NewMasterUp. My replication role is master') + if int(is_repl_main): + LOG.debug('Skip NewMainUp. My replication role is main') return mysql = message.body host = message.local_ip or message.remote_ip - LOG.info("Switching replication to a new MySQL master %s", host) - bus.fire('before_mysql_change_master', host=host) + LOG.info("Switching replication to a new MySQL main %s", host) + bus.fire('before_mysql_change_main', host=host) if 'snapshot_config' in mysql and mysql['snapshot_config']['type'] != 'eph': - LOG.info('Reinitializing Slave from the new snapshot %s (log_file: %s log_pos: %s)', + LOG.info('Reinitializing Subordinate from the new snapshot %s (log_file: %s log_pos: %s)', message.snapshot_config['id'], message.log_file, message.log_pos) - self._stop_service('Swapping storages to reinitialize slave') + self._stop_service('Swapping storages to reinitialize subordinate') LOG.debug('Destroing old storage') self.storage_vol.destroy() @@ -1129,10 +1129,10 @@ def on_Mysql_NewMasterUp(self, message): my_cli = spawn_mysql_cli(ROOT_USER, message.root_password) if not 'snapshot_config' in mysql or mysql['snapshot_config']['type'] == 'eph': - LOG.debug("Stopping slave i/o thread") + LOG.debug("Stopping subordinate i/o thread") my_cli.sendline("STOP SLAVE IO_THREAD;") my_cli.expect("mysql>") - LOG.debug("Slave i/o thread stopped") + LOG.debug("Subordinate i/o thread stopped") LOG.debug("Retrieving current log_file and log_pos") my_cli.sendline("SHOW SLAVE STATUS\\G"); @@ -1140,24 +1140,24 @@ def on_Mysql_NewMasterUp(self, message): log_file = log_pos = None for line in my_cli.before.split("\n"): pair = map(str.strip, line.split(": ", 1)) - if pair[0] == "Master_Log_File": + if pair[0] == "Main_Log_File": log_file = pair[1] - elif pair[0] == "Read_Master_Log_Pos": + elif pair[0] == "Read_Main_Log_Pos": log_pos = pair[1] LOG.debug("Retrieved log_file=%s, log_pos=%s", log_file, log_pos) - self._change_master( + self._change_main( host=host, user=REPL_USER, password=message.repl_password, log_file=log_file, log_pos=log_pos, - timeout=self._change_master_timeout, + timeout=self._change_main_timeout, my_cli=my_cli ) LOG.debug("Replication switched") - bus.fire('mysql_change_master', host=host, log_file=log_file, log_pos=log_pos) + bus.fire('mysql_change_main', host=host, log_file=log_file, log_pos=log_pos) def on_before_reboot_start(self, *args, **kwargs): @@ -1185,7 +1185,7 @@ def on_host_init_response(self, message): mysql_data = message.mysql.copy() # New JSON format pass non-string types - mysql_data['replication_master'] = str(mysql_data['replication_master']) + mysql_data['replication_main'] = str(mysql_data['replication_main']) for key, file in ((OPT_VOLUME_CNF, self._volume_config_path), (OPT_SNAPSHOT_CNF, self._snapshot_config_path)): @@ -1234,13 +1234,13 @@ def on_before_host_up(self, message): except: pass - repl = 'master' if int(self._cnf.rawini.get(CNF_SECTION, OPT_REPLICATION_MASTER)) else 'slave' - if repl == 'master': + repl = 'main' if int(self._cnf.rawini.get(CNF_SECTION, OPT_REPLICATION_MASTER)) else 'subordinate' + if repl == 'main': bus.fire('before_mysql_configure', replication=repl) - self._init_master(message) + self._init_main(message) else: bus.fire('before_mysql_configure', replication=repl) - self._init_slave(message) + self._init_subordinate(message) bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl) @@ -1254,14 +1254,14 @@ def _change_selinux_ctx(self): system2((chcon_exec, '-R', '-u', 'system_u', '-r', 'object_r', '-t', 'mysqld_db_t', os.path.dirname(STORAGE_PATH)), raise_exc=False) - def _init_master(self, message): + def _init_main(self, message): """ - Initialize MySQL master + Initialize MySQL main @type message: scalarizr.messaging.Message @param message: HostUp message """ log = bus.init_op.logger - log.info("Initializing MySQL master") + log.info("Initializing MySQL main") log.info('Create storage') # Plug storage @@ -1275,7 +1275,7 @@ def _init_master(self, message): Storage.backup_config(self.storage_vol.config(), self._volume_config_path) # Stop MySQL server - #self._stop_service('Required by Master initialization process') + #self._stop_service('Required by Main initialization process') self._flush_logs() msg_data = None @@ -1302,9 +1302,9 @@ def _init_master(self, message): # Init replication log.info('Patch my.cnf configuration file') - self._replication_init(master=True) + self._replication_init(main=True) - # If It's 1st init of mysql master storage + # If It's 1st init of mysql main storage if not storage_valid: self._copy_debian_cnf() @@ -1382,21 +1382,21 @@ def _compat_storage_data(self, vol=None, snap=None): ret['snapshot_id'] = snap.config()['id'] return ret - def _init_slave(self, message): + def _init_subordinate(self, message): """ - Initialize MySQL slave + Initialize MySQL subordinate @type message: scalarizr.messaging.Message @param message: HostUp message """ log = bus.init_op.logger - LOG.info("Initializing MySQL slave") + LOG.info("Initializing MySQL subordinate") log.info('Create storage') # Read required configuration options root_pass, repl_pass, log_file, log_pos = self._get_ini_options( OPT_ROOT_PASSWORD, OPT_REPL_PASSWORD, OPT_LOG_FILE, OPT_LOG_POS) - LOG.debug("Initialize slave storage") + LOG.debug("Initialize subordinate storage") self.storage_vol = self._plug_storage(self._storage_path, dict(snapshot=Storage.restore_config(self._snapshot_config_path))) Storage.backup_config(self.storage_vol.config(), self._volume_config_path) @@ -1405,7 +1405,7 @@ def _init_slave(self, message): try: log.info('Patch my.cnf configuration file') # Stop MySQL - #self._stop_service('Required by Slave initialization process') + #self._stop_service('Required by Subordinate initialization process') self._flush_logs() # Change configuration files @@ -1420,32 +1420,32 @@ def _init_slave(self, message): self._move_mysql_dir('mysqld/datadir', self._data_dir) self._move_mysql_dir('mysqld/log_bin', self._binlog_base) self._change_selinux_ctx() - self._replication_init(master=False) + self._replication_init(main=False) self._copy_debian_cnf_back() log.info('InnoDB recovery') self._innodb_recovery() self._start_service() - # Change replication master - log.info('Change replication Master') - master_host = None - LOG.info("Requesting master server") - while not master_host: + # Change replication main + log.info('Change replication Main') + main_host = None + LOG.info("Requesting main server") + while not main_host: try: - master_host = list(host + main_host = list(host for host in self._queryenv.list_roles(behaviour=BEHAVIOUR)[0].hosts - if host.replication_master)[0] + if host.replication_main)[0] except IndexError: - LOG.debug("QueryEnv respond with no mysql master. " + + LOG.debug("QueryEnv respond with no mysql main. " + "Waiting %d seconds before the next attempt", 5) time.sleep(5) - LOG.debug("Master server obtained (local_ip: %s, public_ip: %s)", - master_host.internal_ip, master_host.external_ip) + LOG.debug("Main server obtained (local_ip: %s, public_ip: %s)", + main_host.internal_ip, main_host.external_ip) - host = master_host.internal_ip or master_host.external_ip - self._change_master( + host = main_host.internal_ip or main_host.external_ip + self._change_main( host=host, user=REPL_USER, password=repl_pass, @@ -1453,7 +1453,7 @@ def _init_slave(self, message): log_pos=log_pos, mysql_user=ROOT_USER, mysql_password=root_pass, - timeout=self._change_master_timeout + timeout=self._change_main_timeout ) # Update HostUp message @@ -1573,13 +1573,13 @@ def _create_snapshot(self, root_user, root_password, dry_run=False, tags=None): log_file = log_row.group(1) log_pos = log_row.group(2) else: - raise HandlerError('SHOW MASTER STATUS returns empty set. Master is not started?') + raise HandlerError('SHOW MASTER STATUS returns empty set. Main is not started?') ''' try: status = mysql.client.fetchall('SHOW MASTER STATUS')[0] except IndexError: - raise HandlerError('SHOW MASTER STATUS returns empty set. Master is not started?') + raise HandlerError('SHOW MASTER STATUS returns empty set. Main is not started?') else: log_file, log_pos = status['File'], status['Position'] ''' @@ -1587,12 +1587,12 @@ def _create_snapshot(self, root_user, root_password, dry_run=False, tags=None): my_cli.sendline('SHOW SLAVE STATUS \G') my_cli.expect('mysql>') lines = my_cli.before - log_row = re.search(re.compile('Relay_Master_Log_File:\s*(.*?)$.*?Exec_Master_Log_Pos:\s*(.*?)$', re.M | re.S), lines) + log_row = re.search(re.compile('Relay_Main_Log_File:\s*(.*?)$.*?Exec_Main_Log_Pos:\s*(.*?)$', re.M | re.S), lines) if log_row: log_file = log_row.group(1).strip() log_pos = log_row.group(2).strip() else: - raise HandlerError('SHOW SLAVE STATUS returns empty set. Slave is not started?') + raise HandlerError('SHOW SLAVE STATUS returns empty set. Subordinate is not started?') # Creating storage snapshot snap = None if dry_run else self._create_storage_snapshot(tags) @@ -1645,7 +1645,7 @@ def _add_mysql_users(self, root_user, repl_user, stat_user, root_pass=None, repl stat_password = stat_pass if stat_pass else cryptotool.pwgen(20) self._add_mysql_user(my_cli, root_user, root_password, '%') self._add_mysql_user(my_cli, root_user, root_password, 'localhost') - self._add_mysql_user(my_cli, repl_user, repl_password, '%', ('Repl_slave_priv',)) + self._add_mysql_user(my_cli, repl_user, repl_password, '%', ('Repl_subordinate_priv',)) self._add_mysql_user(my_cli, stat_user, stat_password, '%', ('Repl_client_priv',)) if should_term_mysqld: @@ -1696,10 +1696,10 @@ def _update_config(self, data): @_reload_mycnf - def _replication_init(self, master=True): + def _replication_init(self, main=True): # Create replication config self._mysql_config.set('mysqld/expire_logs_days', 10, force=True) - server_id = 1 if master else int(random.random() * 100000)+1 + server_id = 1 if main else int(random.random() * 100000)+1 self._mysql_config.remove('mysqld/server-id') self._mysql_config.add('mysqld/server-id', server_id) # Patch networking @@ -1711,13 +1711,13 @@ def _replication_init(self, master=True): self.write_config() - def _change_master(self, host, user, password, log_file, log_pos, + def _change_main(self, host, user, password, log_file, log_pos, my_cli=None, mysql_user=None, mysql_password=None, connect_retry=15, timeout=None): my_cli = my_cli or spawn_mysql_cli(mysql_user, mysql_password) - LOG.info("Changing replication Master to server %s (log_file: %s, log_pos: %s)", host, log_file, log_pos) + LOG.info("Changing replication Main to server %s (log_file: %s, log_pos: %s)", host, log_file, log_pos) - # Changing replication master + # Changing replication main my_cli.sendline('STOP SLAVE;') my_cli.expect('mysql>') my_cli.sendline('CHANGE MASTER TO MASTER_HOST="%(host)s", \ @@ -1728,14 +1728,14 @@ def _change_master(self, host, user, password, log_file, log_pos, MASTER_CONNECT_RETRY=%(connect_retry)s;' % vars()) my_cli.expect('mysql>') - # Starting slave + # Starting subordinate my_cli.sendline('START SLAVE;') my_cli.expect('mysql>') status = my_cli.before if re.search(re.compile('ERROR', re.MULTILINE), status): - raise HandlerError('Cannot start mysql slave: %s' % status) + raise HandlerError('Cannot start mysql subordinate: %s' % status) - def slave_status(): + def subordinate_status(): my_cli.sendline('SHOW SLAVE STATUS\G') my_cli.expect('mysql>') out = my_cli.before @@ -1746,9 +1746,9 @@ def slave_status(): time_until = time.time() + timeout status = None while time.time() <= time_until: - status = slave_status() - if status['Slave_IO_Running'] == 'Yes' and \ - status['Slave_SQL_Running'] == 'Yes': + status = subordinate_status() + if status['Subordinate_IO_Running'] == 'Yes' and \ + status['Subordinate_SQL_Running'] == 'Yes': break time.sleep(5) else: @@ -1757,7 +1757,7 @@ def slave_status(): logfile = firstmatched(lambda p: os.path.exists(p), ('/var/log/mysqld.log', '/var/log/mysql.log')) if logfile: - gotcha = '[ERROR] Slave I/O thread: ' + gotcha = '[ERROR] Subordinate I/O thread: ' size = os.path.getsize(logfile) fp = open(logfile, 'r') try: @@ -1769,21 +1769,21 @@ def slave_status(): finally: fp.close() - msg = "Cannot change replication Master server to '%s'. " \ - "Slave_IO_Running: %s, Slave_SQL_Running: %s, " \ + msg = "Cannot change replication Main server to '%s'. " \ + "Subordinate_IO_Running: %s, Subordinate_SQL_Running: %s, " \ "Last_Errno: %s, Last_Error: '%s'" % ( - host, status['Slave_IO_Running'], status['Slave_SQL_Running'], + host, status['Subordinate_IO_Running'], status['Subordinate_SQL_Running'], status['Last_Errno'], status['Last_Error']) raise HandlerError(msg) else: - raise HandlerError('Cannot change replication master to %s' % (host)) + raise HandlerError('Cannot change replication main to %s' % (host)) finally: try: my_cli.close() except: os.kill(my_cli.pid, signal.SIGKILL) - LOG.debug('Replication master is changed to host %s', host) + LOG.debug('Replication main is changed to host %s', host) def _ping_mysql(self): @@ -1846,7 +1846,7 @@ def _flush_logs(self): if not os.path.exists(self._data_dir): return - info_files = ['relay-log.info', 'master.info'] + info_files = ['relay-log.info', 'main.info'] files = os.listdir(self._data_dir) for file in files: diff --git a/src/scalarizr/handlers/mysql2.py b/src/scalarizr/handlers/mysql2.py index baa4787..8d8e50f 100755 --- a/src/scalarizr/handlers/mysql2.py +++ b/src/scalarizr/handlers/mysql2.py @@ -46,7 +46,7 @@ PRIVILEGES = { - __mysql__['repl_user']: ('Repl_slave_priv', ), + __mysql__['repl_user']: ('Repl_subordinate_priv', ), __mysql__['stat_user']: ('Repl_client_priv', ) } @@ -248,14 +248,14 @@ def __init__(self): bus.define_events( 'before_mysql_data_bundle', 'mysql_data_bundle', - # @param host: New master hostname - 'before_mysql_change_master', - # @param host: New master hostname + # @param host: New main hostname + 'before_mysql_change_main', + # @param host: New main hostname # @param log_file: log file to start from # @param log_pos: log pos to start from - 'mysql_change_master' - 'before_slave_promote_to_master', - 'slave_promote_to_master' + 'mysql_change_main' + 'before_subordinate_promote_to_main', + 'subordinate_promote_to_main' ) self._mysql_api = mysql_api.MySQLAPI() @@ -298,7 +298,7 @@ def on_init(self): vol = storage2.volume(__mysql__['volume']) vol.ensure(mount=True) __mysql__['volume'] = vol - if int(__mysql__['replication_master']): + if int(__mysql__['replication_main']): LOG.debug("Checking Scalr's %s system users presence", __mysql__['behavior']) creds = self.get_user_creds() @@ -342,9 +342,9 @@ def on_host_init_response(self, message): # Compatibility transformation # - volume_config -> volume - # - master n'th start, type=ebs - del snapshot_config + # - main n'th start, type=ebs - del snapshot_config # - snapshot_config + log_file + log_pos -> restore - # - create backup on master 1'st start + # - create backup on main 1'st start md['compat_prior_backup_restore'] = True if md.get('volume_config'): @@ -367,7 +367,7 @@ def on_host_init_response(self, message): volume=md['volume'], log_file=md.pop('log_file'), log_pos=md.pop('log_pos')) - elif int(md['replication_master']) and \ + elif int(md['replication_main']) and \ not md['volume'].device: md['backup'] = backup.backup( type='snap_mysql', @@ -406,12 +406,12 @@ def on_before_host_up(self, message): if 'Amazon' == linux.os['name']: self.mysql.my_cnf.pid_file = os.path.join(__mysql__['data_dir'], 'mysqld.pid') - repl = 'master' if int(__mysql__['replication_master']) else 'slave' + repl = 'main' if int(__mysql__['replication_main']) else 'subordinate' bus.fire('before_mysql_configure', replication=repl) - if repl == 'master': - self._init_master(message) + if repl == 'main': + self._init_main(message) else: - self._init_slave(message) + self._init_subordinate(message) # Force to resave volume settings __mysql__['volume'] = storage2.volume(__mysql__['volume']) bus.fire('service_configured', service_name=__mysql__['behavior'], @@ -426,7 +426,7 @@ def on_BeforeHostTerminate(self, message): LOG.info('Detaching MySQL storage') vol = storage2.volume(__mysql__['volume']) vol.detach() - if not int(__mysql__['replication_master']): + if not int(__mysql__['replication_main']): LOG.info('Destroying volume %s', vol.id) vol.destroy(remove_disks=True) LOG.info('Volume %s has been destroyed.' % vol.id) @@ -440,10 +440,10 @@ def on_Mysql_CreatePmaUser(self, message): assert message.farm_role_id try: - # Operation allowed only on Master server - if not int(__mysql__['replication_master']): - msg = 'Cannot add pma user on slave. ' \ - 'It should be a Master server' + # Operation allowed only on Main server + if not int(__mysql__['replication_main']): + msg = 'Cannot add pma user on subordinate. ' \ + 'It should be a Main server' raise HandlerError(msg) pma_server_ip = message.pma_server_ip @@ -519,19 +519,19 @@ def on_DbMsr_CancelDataBundle(self, message): self._op_api.cancel(self._data_bundle_id) - def on_DbMsr_PromoteToMaster(self, message): + def on_DbMsr_PromoteToMain(self, message): """ - Promote slave to master + Promote subordinate to main """ - LOG.debug("on_DbMsr_PromoteToMaster") + LOG.debug("on_DbMsr_PromoteToMain") mysql2 = message.body[__mysql__['behavior']] - if int(__mysql__['replication_master']): - LOG.warning('Cannot promote to master. Already master') + if int(__mysql__['replication_main']): + LOG.warning('Cannot promote to main. Already main') return - LOG.info('Starting Slave -> Master promotion') + LOG.info('Starting Subordinate -> Main promotion') - bus.fire('before_slave_promote_to_master') + bus.fire('before_subordinate_promote_to_main') __mysql__['compat_prior_backup_restore'] = mysql2.get('volume_config') or \ mysql2.get('snapshot_config') or \ @@ -548,11 +548,11 @@ def on_DbMsr_PromoteToMaster(self, message): try: if new_vol and new_vol.type not in ('eph', 'lvm'): if self.mysql.service.running: - self.root_client.stop_slave() + self.root_client.stop_subordinate() - self.mysql.service.stop('Swapping storages to promote slave to master') + self.mysql.service.stop('Swapping storages to promote subordinate to main') - # Unplug slave storage and plug master one + # Unplug subordinate storage and plug main one old_vol = storage2.volume(__mysql__['volume']) try: if old_vol.type == 'raid': @@ -561,11 +561,11 @@ def on_DbMsr_PromoteToMaster(self, message): old_vol.umount() new_vol.mpoint = __mysql__['storage_dir'] new_vol.ensure(mount=True) - # Continue if master storage is a valid MySQL storage + # Continue if main storage is a valid MySQL storage if self._storage_valid(): # Patch configuration files self.mysql.move_mysqldir_to(__mysql__['storage_dir']) - self.mysql._init_replication(master=True) + self.mysql._init_replication(main=True) # Set read_only option #self.mysql.my_cnf.read_only = False self.mysql.my_cnf.set('mysqld/sync_binlog', '1') @@ -575,7 +575,7 @@ def on_DbMsr_PromoteToMaster(self, message): self.mysql.service.start() # Update __mysql__['behavior'] configuration __mysql__.update({ - 'replication_master': 1, + 'replication_main': 1, 'root_password': mysql2['root_password'], 'repl_password': mysql2['repl_password'], 'stat_password': mysql2['stat_password'], @@ -624,12 +624,12 @@ def on_DbMsr_PromoteToMaster(self, message): self.mysql.service.stop() self.mysql.service.start() - self.root_client.stop_slave() - self.root_client.reset_master() + self.root_client.stop_subordinate() + self.root_client.reset_main() self.mysql.flush_logs(__mysql__['data_dir']) __mysql__.update({ - 'replication_master': 1, + 'replication_main': 1, 'root_password': mysql2['root_password'], 'repl_password': mysql2['repl_password'], 'stat_password': mysql2['stat_password'], @@ -675,7 +675,7 @@ def on_DbMsr_PromoteToMaster(self, message): self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) LOG.info('Promotion completed') - bus.fire('slave_promote_to_master') + bus.fire('subordinate_promote_to_main') except (Exception, BaseException), e: LOG.exception(e) @@ -693,7 +693,7 @@ def on_DbMsr_PromoteToMaster(self, message): self.mysql.service.start() - def on_DbMsr_NewMasterUp(self, message): + def on_DbMsr_NewMainUp(self, message): try: assert message.body.has_key("db_type") assert message.body.has_key("local_ip") @@ -702,13 +702,13 @@ def on_DbMsr_NewMasterUp(self, message): mysql2 = message.body[__mysql__['behavior']] - if int(__mysql__['replication_master']): - LOG.debug('Skip NewMasterUp. My replication role is master') + if int(__mysql__['replication_main']): + LOG.debug('Skip NewMainUp. My replication role is main') return host = message.local_ip or message.remote_ip - LOG.info("Switching replication to a new MySQL master %s", host) - bus.fire('before_mysql_change_master', host=host) + LOG.info("Switching replication to a new MySQL main %s", host) + bus.fire('before_mysql_change_main', host=host) LOG.debug("__mysql__['volume']: %s", __mysql__['volume']) @@ -726,14 +726,14 @@ def on_DbMsr_NewMasterUp(self, message): # XXX: ugly old_vol = None if __mysql__['volume'].type == 'eph': - self.mysql.service.stop('Swapping storages to reinitialize slave') + self.mysql.service.stop('Swapping storages to reinitialize subordinate') - LOG.info('Reinitializing Slave from the new snapshot %s (log_file: %s log_pos: %s)', + LOG.info('Reinitializing Subordinate from the new snapshot %s (log_file: %s log_pos: %s)', restore.snapshot['id'], restore.log_file, restore.log_pos) new_vol = restore.run() else: if __node__['platform'].name == 'idcf': - self.mysql.service.stop('Detaching old Slave volume') + self.mysql.service.stop('Detaching old Subordinate volume') old_vol = dict(__mysql__['volume']) old_vol = storage2.volume(old_vol) old_vol.umount() @@ -746,21 +746,21 @@ def on_DbMsr_NewMasterUp(self, message): self.mysql.service.start() if __node__['platform'].name == 'idcf' and old_vol: - LOG.info('Destroying old Slave volume') + LOG.info('Destroying old Subordinate volume') old_vol.destroy(remove_disks=True) else: - LOG.debug("Stopping slave i/o thread") - self.root_client.stop_slave_io_thread() - LOG.debug("Slave i/o thread stopped") + LOG.debug("Stopping subordinate i/o thread") + self.root_client.stop_subordinate_io_thread() + LOG.debug("Subordinate i/o thread stopped") LOG.debug("Retrieving current log_file and log_pos") - status = self.root_client.slave_status() - log_file = status['Master_Log_File'] - log_pos = status['Read_Master_Log_Pos'] + status = self.root_client.subordinate_status() + log_file = status['Main_Log_File'] + log_pos = status['Read_Main_Log_Pos'] LOG.debug("Retrieved log_file=%s, log_pos=%s", log_file, log_pos) - self._change_master( + self._change_main( host=host, user=__mysql__['repl_user'], password=mysql2['repl_password'], @@ -770,7 +770,7 @@ def on_DbMsr_NewMasterUp(self, message): ) LOG.debug("Replication switched") - bus.fire('mysql_change_master', host=host, log_file=log_file, log_pos=log_pos) + bus.fire('mysql_change_main', host=host, log_file=log_file, log_pos=log_pos) msg_data = dict( db_type = __mysql__['behavior'], @@ -856,7 +856,7 @@ def _fix_percona_debian_cnf(self): debian_cnf.write(__mysql__['debian.cnf']) - def _change_my_cnf(self, slave=False): + def _change_my_cnf(self, subordinate=False): # Patch configuration options = { 'bind-address': '0.0.0.0', @@ -866,7 +866,7 @@ def _change_my_cnf(self, slave=False): 'sync_binlog': '1', 'expire_logs_days': '10' } - if slave: + if subordinate: options['read_only'] = True if mysql2_svc.innodb_enabled(): options['innodb_flush_log_at_trx_commit'] = '1' @@ -886,13 +886,13 @@ def _change_my_cnf(self, slave=False): self.mysql.my_cnf.set('mysqld/' + key, value) - def _init_master(self, message): + def _init_main(self, message): """ - Initialize MySQL master + Initialize MySQL main @type message: scalarizr.messaging.Message @param message: HostUp message """ - LOG.info("Initializing MySQL master") + LOG.info("Initializing MySQL main") log = bus.init_op.logger log.info('Create storage') @@ -972,14 +972,14 @@ def _init_master(self, message): log.info('Patch my.cnf configuration file') # Init replication - self.mysql._init_replication(master=True) + self.mysql._init_replication(main=True) if 'restore' in __mysql__ and \ __mysql__['restore'].type == 'xtrabackup': __mysql__['restore'].run() - # If It's 1st init of mysql master storage + # If It's 1st init of mysql main storage if not storage_valid: if os.path.exists(__mysql__['debian.cnf']): log.info("Copying debian.cnf file to mysql storage") @@ -1004,11 +1004,11 @@ def _init_master(self, message): # Update HostUp message log.info('Collect HostUp data') md = dict( - replication_master=__mysql__['replication_master'], + replication_main=__mysql__['replication_main'], root_password=__mysql__['root_password'], repl_password=__mysql__['repl_password'], stat_password=__mysql__['stat_password'], - master_password=__mysql__['master_password'] + main_password=__mysql__['main_password'] ) if self._hir_volume_growth: @@ -1041,13 +1041,13 @@ def _init_master(self, message): - def _init_slave(self, message): + def _init_subordinate(self, message): """ - Initialize MySQL slave + Initialize MySQL subordinate @type message: scalarizr.messaging.Message @param message: HostUp message """ - LOG.info("Initializing MySQL slave") + LOG.info("Initializing MySQL subordinate") log = bus.init_op.logger log.info('Create storage') @@ -1058,15 +1058,15 @@ def _init_slave(self, message): __mysql__['volume'].ensure(mount=True, mkfs=True) log.info('Patch my.cnf configuration file') - self.mysql.service.stop('Required by Slave initialization process') + self.mysql.service.stop('Required by Subordinate initialization process') self.mysql.flush_logs(__mysql__['data_dir']) self._fix_percona_debian_cnf() - self._change_my_cnf(slave=True) + self._change_my_cnf(subordinate=True) log.info('Move data directory to storage') self.mysql.move_mysqldir_to(__mysql__['storage_dir']) self._change_selinux_ctx() - self.mysql._init_replication(master=False) + self.mysql._init_replication(main=False) self._copy_debian_cnf_back() if 'restore' in __mysql__ and \ @@ -1074,7 +1074,7 @@ def _init_slave(self, message): __mysql__['restore'].run() # MySQL 5.6 stores UUID into data_dir/auto.cnf, which leads to - # 'Fatal error: The slave I/O thread stops because master and slave have equal MySQL server UUIDs' + # 'Fatal error: The subordinate I/O thread stops because main and subordinate have equal MySQL server UUIDs' coreutils.remove(os.path.join(__mysql__['data_dir'], 'auto.cnf')) log.info('InnoDB recovery') @@ -1082,13 +1082,13 @@ def _init_slave(self, message): and __mysql__['restore'].type != 'xtrabackup': self._innodb_recovery() - log.info('Change replication Master') - # Change replication master - LOG.info("Requesting master server") - master_host = self.get_master_host() + log.info('Change replication Main') + # Change replication main + LOG.info("Requesting main server") + main_host = self.get_main_host() self.mysql.service.start() - self._change_master( - host=master_host, + self._change_main( + host=main_host, user=__mysql__['repl_user'], password=__mysql__['repl_password'], log_file=__mysql__['restore'].log_file, @@ -1100,20 +1100,20 @@ def _init_slave(self, message): message.db_type = __mysql__['behavior'] - def get_master_host(self): - master_host = None - while not master_host: + def get_main_host(self): + main_host = None + while not main_host: try: - master_host = list(host + main_host = list(host for host in self._queryenv.list_roles(behaviour=__mysql__['behavior'])[0].hosts - if host.replication_master)[0] + if host.replication_main)[0] except IndexError: - LOG.debug("QueryEnv respond with no mysql master. " + + LOG.debug("QueryEnv respond with no mysql main. " + "Waiting %d seconds before the next attempt", 5) time.sleep(5) - LOG.debug("Master server obtained (local_ip: %s, public_ip: %s)", - master_host.internal_ip, master_host.external_ip) - return master_host.internal_ip or master_host.external_ip + LOG.debug("Main server obtained (local_ip: %s, public_ip: %s)", + main_host.internal_ip, main_host.external_ip) + return main_host.internal_ip or main_host.external_ip def _copy_debian_cnf_back(self): @@ -1147,7 +1147,7 @@ def _innodb_recovery(self, storage_path=None): '--skip-networking', '--skip-grant', '--bootstrap', - '--skip-slave-start') + '--skip-subordinate-start') system2(mysqld_safe_cmd, stdin="select 1;") @@ -1163,8 +1163,8 @@ def get_user_creds(self): __mysql__['root_user']: 'root_password', __mysql__['repl_user']: 'repl_password', __mysql__['stat_user']: 'stat_password', - # __mysql__['master_user']: 'master_password' - # TODO: disabled scalr_master user until scalr will send/recv it in communication messages + # __mysql__['main_user']: 'main_password' + # TODO: disabled scalr_main user until scalr will send/recv it in communication messages } creds = {} for login, opt_pwd in options.items(): @@ -1183,10 +1183,10 @@ def create_users(self, **creds): local_root = mysql_svc.MySQLUser(root_cli, __mysql__['root_user'], creds[__mysql__['root_user']], host='localhost') - #local_master = mysql_svc.MySQLUser(root_cli, __mysql__['master_user'], - # creds[__mysql__['master_user']], host='localhost', - # privileges=PRIVILEGES.get(__mysql__['master_user'], None)) - #users['master@localhost'] = local_master + #local_main = mysql_svc.MySQLUser(root_cli, __mysql__['main_user'], + # creds[__mysql__['main_user']], host='localhost', + # privileges=PRIVILEGES.get(__mysql__['main_user'], None)) + #users['main@localhost'] = local_main if not self.mysql.service.running: self.mysql.service.start() @@ -1237,29 +1237,29 @@ def _datadir_size(self): return stat.f_bsize * stat.f_blocks / 1024 / 1024 / 1024 + 1 - def _change_master(self, host, user, password, log_file, log_pos, timeout=None): + def _change_main(self, host, user, password, log_file, log_pos, timeout=None): - LOG.info("Changing replication Master to server %s (log_file: %s, log_pos: %s)", + LOG.info("Changing replication Main to server %s (log_file: %s, log_pos: %s)", host, log_file, log_pos) - timeout = timeout or int(__mysql__['change_master_timeout']) + timeout = timeout or int(__mysql__['change_main_timeout']) - # Changing replication master - self.root_client.stop_slave() - self.root_client.change_master_to(host, user, password, log_file, log_pos) + # Changing replication main + self.root_client.stop_subordinate() + self.root_client.change_main_to(host, user, password, log_file, log_pos) - # Starting slave - result = self.root_client.start_slave() - LOG.debug('Start slave returned: %s' % result) + # Starting subordinate + result = self.root_client.start_subordinate() + LOG.debug('Start subordinate returned: %s' % result) if result and 'ERROR' in result: - raise HandlerError('Cannot start mysql slave: %s' % result) + raise HandlerError('Cannot start mysql subordinate: %s' % result) time_until = time.time() + timeout status = None while time.time() <= time_until: - status = self.root_client.slave_status() - if status['Slave_IO_Running'] == 'Yes' and \ - status['Slave_SQL_Running'] == 'Yes': + status = self.root_client.subordinate_status() + if status['Subordinate_IO_Running'] == 'Yes' and \ + status['Subordinate_SQL_Running'] == 'Yes': break time.sleep(5) else: @@ -1268,7 +1268,7 @@ def _change_master(self, host, user, password, log_file, log_pos, timeout=None): logfile = firstmatched(lambda p: os.path.exists(p), ('/var/log/mysqld.log', '/var/log/mysql.log')) if logfile: - gotcha = '[ERROR] Slave I/O thread: ' + gotcha = '[ERROR] Subordinate I/O thread: ' size = os.path.getsize(logfile) fp = open(logfile, 'r') try: @@ -1280,15 +1280,15 @@ def _change_master(self, host, user, password, log_file, log_pos, timeout=None): finally: fp.close() - msg = "Cannot change replication Master server to '%s'. " \ - "Slave_IO_Running: %s, Slave_SQL_Running: %s, " \ + msg = "Cannot change replication Main server to '%s'. " \ + "Subordinate_IO_Running: %s, Subordinate_SQL_Running: %s, " \ "Last_Errno: %s, Last_Error: '%s'" % ( - host, status['Slave_IO_Running'], status['Slave_SQL_Running'], + host, status['Subordinate_IO_Running'], status['Subordinate_SQL_Running'], status['Last_Errno'], status['Last_Error']) raise HandlerError(msg) else: - raise HandlerError('Cannot change replication master to %s' % (host)) + raise HandlerError('Cannot change replication main to %s' % (host)) - LOG.debug('Replication master is changed to host %s', host) + LOG.debug('Replication main is changed to host %s', host) diff --git a/src/scalarizr/handlers/mysqlproxy.py b/src/scalarizr/handlers/mysqlproxy.py index 9948728..5801a4f 100755 --- a/src/scalarizr/handlers/mysqlproxy.py +++ b/src/scalarizr/handlers/mysqlproxy.py @@ -22,7 +22,7 @@ BEHAVIOUR = SERVICE_NAME = 'mysql_proxy' CONFIG_FILE_PATH = '/etc/mysql_proxy.conf' PID_FILE = '/var/run/mysql-proxy.pid' -NEW_MASTER_UP = "Mysql_NewMasterUp" +NEW_MASTER_UP = "Mysql_NewMainUp" LOG_FILE = '/var/log/mysql-proxy.log' LOG = logging.getLogger(__name__) @@ -192,8 +192,8 @@ def _reload_backends(self): queryenv = bus.queryenv_service roles = queryenv.list_roles() - master = None - slaves = [] + main = None + subordinates = [] for role in roles: if not is_mysql_role(role.behaviour): @@ -201,18 +201,18 @@ def _reload_backends(self): for host in role.hosts: ip = host.internal_ip or host.external_ip - if host.replication_master: - master = ip + if host.replication_main: + main = ip else: - slaves.append(ip) + subordinates.append(ip) - if master: - self._logger.debug('Adding mysql master %s to mysql-proxy defaults file', master) - self.config.add('./mysql-proxy/proxy-backend-addresses', '%s:3306' % master) - if slaves: - self._logger.debug('Adding mysql slaves to mysql-proxy defaults file: %s', ', '.join(slaves)) - for slave in slaves: - self.config.add('./mysql-proxy/proxy-read-only-backend-addresses', '%s:3306' % slave) + if main: + self._logger.debug('Adding mysql main %s to mysql-proxy defaults file', main) + self.config.add('./mysql-proxy/proxy-backend-addresses', '%s:3306' % main) + if subordinates: + self._logger.debug('Adding mysql subordinates to mysql-proxy defaults file: %s', ', '.join(subordinates)) + for subordinate in subordinates: + self.config.add('./mysql-proxy/proxy-read-only-backend-addresses', '%s:3306' % subordinate) self.config.set('./mysql-proxy/pid-file', PID_FILE, force=True) self.config.set('./mysql-proxy/daemon', 'true', force=True) @@ -230,4 +230,4 @@ def _reload_backends(self): def on_HostUp(self, message): self._reload_backends() - on_DbMsr_NewMasterUp = on_Mysql_NewMasterUp = on_HostDown = on_HostUp + on_DbMsr_NewMainUp = on_Mysql_NewMainUp = on_HostDown = on_HostUp diff --git a/src/scalarizr/handlers/postgresql.py b/src/scalarizr/handlers/postgresql.py index 5ddafb3..b51120e 100755 --- a/src/scalarizr/handlers/postgresql.py +++ b/src/scalarizr/handlers/postgresql.py @@ -114,15 +114,15 @@ def __init__(self): 'postgresql_data_bundle', - # @param host: New master hostname - 'before_postgresql_change_master', + # @param host: New main hostname + 'before_postgresql_change_main', - # @param host: New master hostname - 'postgresql_change_master', + # @param host: New main hostname + 'postgresql_change_main', - 'before_slave_promote_to_master', + 'before_subordinate_promote_to_main', - 'slave_promote_to_master' + 'subordinate_promote_to_main' ) self._hir_volume_growth = None @@ -197,7 +197,7 @@ def selinux_enabled(): self._logger.warning("Scalr's root PgSQL user was changed. Recreating.") self.postgresql.root_user.change_system_password(root_password) - if self.is_replication_master: + if self.is_replication_main: #ALTER ROLE cannot be executed in a read-only transaction self._logger.debug("Checking password for pg_role scalr.") if not self.postgresql.root_user.check_role_password(root_password): @@ -214,8 +214,8 @@ def on_reload(self): def on_HostInit(self, message): if message.local_ip != self._platform.get_private_ip() and message.local_ip in self.pg_hosts: - LOG.debug('Got new slave IP: %s. Registering in pg_hba.conf' % message.local_ip) - self.postgresql.register_slave(message.local_ip) + LOG.debug('Got new subordinate IP: %s. Registering in pg_hba.conf' % message.local_ip) + self.postgresql.register_subordinate(message.local_ip) def on_HostUp(self, message): @@ -229,8 +229,8 @@ def on_HostUp(self, message): def on_HostDown(self, message): if message.local_ip != self._platform.get_private_ip(): self.postgresql.unregister_client(message.local_ip) - if self.is_replication_master and self.farmrole_id == message.farm_role_id: - self.postgresql.unregister_slave(message.local_ip) + if self.is_replication_main and self.farmrole_id == message.farm_role_id: + self.postgresql.unregister_subordinate(message.local_ip) @property def farm_hosts(self): @@ -285,7 +285,7 @@ def _tmp_path(self): @property - def is_replication_master(self): + def is_replication_main(self): return True if int(__postgresql__[OPT_REPLICATION_MASTER]) else False @@ -336,9 +336,9 @@ def on_host_init_response(self, message): # Compatibility transformation # - volume_config -> volume - # - master n'th start, type=ebs - del snapshot_config + # - main n'th start, type=ebs - del snapshot_config # - snapshot_config -> restore - # - create backup object on master 1'st start + # - create backup object on main 1'st start postgresql_data['compat_prior_backup_restore'] = True if postgresql_data.get(OPT_VOLUME_CNF): @@ -354,7 +354,7 @@ def on_host_init_response(self, message): if postgresql_data['volume'].device and \ postgresql_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid', 'gce_persistent'): - LOG.debug("Master n'th start detected. Removing snapshot config from message") + LOG.debug("Main n'th start detected. Removing snapshot config from message") postgresql_data.pop(OPT_SNAPSHOT_CNF, None) if postgresql_data.get(OPT_SNAPSHOT_CNF): @@ -363,7 +363,7 @@ def on_host_init_response(self, message): snapshot=postgresql_data.pop(OPT_SNAPSHOT_CNF), volume=postgresql_data['volume']) - if int(postgresql_data['replication_master']): + if int(postgresql_data['replication_main']): postgresql_data['backup'] = backup.backup( type='snap_postgresql', volume=postgresql_data['volume']) @@ -382,13 +382,13 @@ def on_before_host_up(self, message): @param message: HostUp message """ - repl = 'master' if self.is_replication_master else 'slave' + repl = 'main' if self.is_replication_main else 'subordinate' #bus.fire('before_postgresql_configure', replication=repl) - if self.is_replication_master: - self._init_master(message) + if self.is_replication_main: + self._init_main(message) else: - self._init_slave(message) + self._init_subordinate(message) # Force to resave volume settings __postgresql__['volume'] = storage2.volume(__postgresql__['volume']) bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl, preset=self.initial_preset) @@ -406,7 +406,7 @@ def on_BeforeHostTerminate(self, message): if message.local_ip == self._platform.get_private_ip(): LOG.info('Stopping %s service' % BEHAVIOUR) self.postgresql.service.stop('Server will be terminated') - if not self.is_replication_master: + if not self.is_replication_main: LOG.info('Destroying volume %s' % __postgresql__['volume'].id) __postgresql__['volume'].destroy(remove_disks=True) LOG.info('Volume %s has been destroyed.' % __postgresql__['volume'].id) @@ -419,21 +419,21 @@ def on_DbMsr_CreateDataBundle(self, message): self._postgresql_api.create_databundle(async=True) - def on_DbMsr_PromoteToMaster(self, message): + def on_DbMsr_PromoteToMain(self, message): """ - Promote slave to master + Promote subordinate to main @type message: scalarizr.messaging.Message - @param message: postgresql_PromoteToMaster + @param message: postgresql_PromoteToMain """ - LOG.debug("on_DbMsr_PromoteToMaster") + LOG.debug("on_DbMsr_PromoteToMain") postgresql = message.body[BEHAVIOUR] - if int(__postgresql__['replication_master']): - LOG.warning('Cannot promote to master. Already master') + if int(__postgresql__['replication_main']): + LOG.warning('Cannot promote to main. Already main') return - LOG.info('Starting Slave -> Master promotion') - bus.fire('before_slave_promote_to_master') + LOG.info('Starting Subordinate -> Main promotion') + bus.fire('before_subordinate_promote_to_main') msg_data = { 'db_type' : BEHAVIOUR, @@ -451,7 +451,7 @@ def on_DbMsr_PromoteToMaster(self, message): self.postgresql.stop_replication() if new_vol and new_vol.type not in ('eph', 'lvm'): - self.postgresql.service.stop('Unplugging slave storage and then plugging master one') + self.postgresql.service.stop('Unplugging subordinate storage and then plugging main one') old_vol = storage2.volume(__postgresql__['volume']) old_vol.detach(force=True) @@ -465,8 +465,8 @@ def on_DbMsr_PromoteToMaster(self, message): __postgresql__['volume'] = new_vol msg_data[BEHAVIOUR] = {'volume_config': dict(new_vol)} - slaves = [host.internal_ip for host in self._get_slave_hosts()] - self.postgresql.init_master(STORAGE_PATH, self.root_password, slaves) + subordinates = [host.internal_ip for host in self._get_subordinate_hosts()] + self.postgresql.init_main(STORAGE_PATH, self.root_password, subordinates) self.postgresql.start_replication() __postgresql__[OPT_REPLICATION_MASTER] = 1 @@ -480,7 +480,7 @@ def on_DbMsr_PromoteToMaster(self, message): self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) tx_complete = True - bus.fire('slave_promote_to_master') + bus.fire('subordinate_promote_to_main') except (Exception, BaseException), e: LOG.exception(e) @@ -494,21 +494,21 @@ def on_DbMsr_PromoteToMaster(self, message): self.postgresql.service.stop('Unplugging broken storage and then plugging the old one') if new_vol: new_vol.detach() - # Get back slave storage + # Get back subordinate storage if old_vol: old_vol.ensure(mount=True) self.postgresql.service.start() if tx_complete and new_vol and new_vol.type not in ('eph', 'lvm'): - # Delete slave EBS + # Delete subordinate EBS old_vol.destroy(remove_disks=True) - def on_DbMsr_NewMasterUp(self, message): + def on_DbMsr_NewMainUp(self, message): """ - Switch replication to a new master server + Switch replication to a new main server @type message: scalarizr.messaging.Message - @param message: DbMsr_NewMasterUp + @param message: DbMsr_NewMainUp """ try: assert message.body.has_key("db_type") @@ -518,13 +518,13 @@ def on_DbMsr_NewMasterUp(self, message): postgresql_data = message.body[BEHAVIOUR] - if int(__postgresql__['replication_master']): - LOG.debug('Skip NewMasterUp. My replication role is master') + if int(__postgresql__['replication_main']): + LOG.debug('Skip NewMainUp. My replication role is main') return host = message.local_ip or message.remote_ip - LOG.info("Switching replication to a new PostgreSQL master %s", host) - bus.fire('before_postgresql_change_master', host=host) + LOG.info("Switching replication to a new PostgreSQL main %s", host) + bus.fire('before_postgresql_change_main', host=host) LOG.debug("__postgresql__['volume']: %s", __postgresql__['volume']) @@ -538,17 +538,17 @@ def on_DbMsr_NewMasterUp(self, message): snapshot=postgresql_data[OPT_SNAPSHOT_CNF]) if __postgresql__['volume'].type == 'eph': - self.postgresql.service.stop('Swapping storages to reinitialize slave') + self.postgresql.service.stop('Swapping storages to reinitialize subordinate') - LOG.info('Reinitializing Slave from the new snapshot %s', + LOG.info('Reinitializing Subordinate from the new snapshot %s', restore.snapshot['id']) new_vol = restore.run() #self.postgresql.service.start() - self.postgresql.init_slave(STORAGE_PATH, host, __postgresql__['port'], self.root_password) + self.postgresql.init_subordinate(STORAGE_PATH, host, __postgresql__['port'], self.root_password) LOG.debug("Replication switched") - bus.fire('postgresql_change_master', host=host) + bus.fire('postgresql_change_main', host=host) msg_data = dict( db_type = BEHAVIOUR, @@ -573,14 +573,14 @@ def on_DbMsr_CreateBackup(self, message): self._postgresql_api.create_backup(async=True) - def _init_master(self, message): + def _init_main(self, message): """ - Initialize postgresql master + Initialize postgresql main @type message: scalarizr.messaging.Message @param message: HostUp message """ log = bus.init_op.logger - log.info("Initializing PostgreSQL master") + log.info("Initializing PostgreSQL main") log.info('Create storage') @@ -604,8 +604,8 @@ def _init_master(self, message): __postgresql__['volume'].ensure(mount=True, mkfs=True) LOG.debug('Postgres volume config after ensure: %s', dict(__postgresql__['volume'])) - log.info('Initialize Master') - self.postgresql.init_master(mpoint=STORAGE_PATH, password=self.root_password) + log.info('Initialize Main') + self.postgresql.init_main(mpoint=STORAGE_PATH, password=self.root_password) if self.postgresql.first_start and 'backup' in __postgresql__: log.info('Create data bundle') @@ -615,7 +615,7 @@ def _init_master(self, message): log.info('Collect HostUp data') # Update HostUp message - msg_data = dict({OPT_REPLICATION_MASTER: str(int(self.is_replication_master)), + msg_data = dict({OPT_REPLICATION_MASTER: str(int(self.is_replication_main)), OPT_ROOT_USER: self.postgresql.root_user.name, OPT_ROOT_PASSWORD: self.root_password, OPT_ROOT_SSH_PRIVATE_KEY: self.postgresql.root_user.private_key, @@ -650,51 +650,51 @@ def _init_master(self, message): __postgresql__.update(msg_data) - def _get_master_host(self): - master_host = None - LOG.info("Requesting master server") - while not master_host: + def _get_main_host(self): + main_host = None + LOG.info("Requesting main server") + while not main_host: try: - master_host = list(host + main_host = list(host for host in self._queryenv.list_roles(behaviour=BEHAVIOUR)[0].hosts - if host.replication_master)[0] + if host.replication_main)[0] except IndexError: - LOG.debug("QueryEnv respond with no postgresql master. " + + LOG.debug("QueryEnv respond with no postgresql main. " + "Waiting %d seconds before the next attempt", 5) time.sleep(5) - return master_host + return main_host - def _get_slave_hosts(self): + def _get_subordinate_hosts(self): LOG.info("Requesting standby servers") return list(host for host in self._queryenv.list_roles(behaviour=BEHAVIOUR)[0].hosts - if not host.replication_master) + if not host.replication_main) - def _init_slave(self, message): + def _init_subordinate(self, message): """ - Initialize postgresql slave + Initialize postgresql subordinate @type message: scalarizr.messaging.Message @param message: HostUp message """ log = bus.init_op.logger - log.info("Initializing PostgreSQL slave") + log.info("Initializing PostgreSQL subordinate") log.info('Create storage') - LOG.debug("Initialize slave storage") + LOG.debug("Initialize subordinate storage") if 'restore' in __postgresql__ and\ __postgresql__['restore'].type == 'snap_postgresql': __postgresql__['restore'].run() else: __postgresql__['volume'].ensure(mount=True, mkfs=True) - log.info('Initialize Slave') - # Change replication master - master_host = self._get_master_host() + log.info('Initialize Subordinate') + # Change replication main + main_host = self._get_main_host() - LOG.debug("Master server obtained (local_ip: %s, public_ip: %s)", - master_host.internal_ip, master_host.external_ip) + LOG.debug("Main server obtained (local_ip: %s, public_ip: %s)", + main_host.internal_ip, main_host.external_ip) - host = master_host.internal_ip or master_host.external_ip - self.postgresql.init_slave(STORAGE_PATH, host, __postgresql__['port'], self.root_password) + host = main_host.internal_ip or main_host.external_ip + self.postgresql.init_subordinate(STORAGE_PATH, host, __postgresql__['port'], self.root_password) log.info('Collect HostUp data') # Update HostUp message diff --git a/src/scalarizr/handlers/rabbitmq.py b/src/scalarizr/handlers/rabbitmq.py index 1ccacc7..6a5753b 100755 --- a/src/scalarizr/handlers/rabbitmq.py +++ b/src/scalarizr/handlers/rabbitmq.py @@ -360,8 +360,8 @@ def on_before_host_up(self, message): scalr_user_password = __rabbitmq__['password'] self.rabbitmq.check_scalr_user(scalr_user_password) - master_user_password = __rabbitmq__['password'] - self.rabbitmq.check_master_user(master_user_password) + main_user_password = __rabbitmq__['password'] + self.rabbitmq.check_main_user(main_user_password) cluster_nodes = self.rabbitmq.cluster_nodes() if not all([node in cluster_nodes for node in nodes_to_cluster_with]): diff --git a/src/scalarizr/handlers/redis.py b/src/scalarizr/handlers/redis.py index 42005d8..6e1eca2 100755 --- a/src/scalarizr/handlers/redis.py +++ b/src/scalarizr/handlers/redis.py @@ -70,8 +70,8 @@ class RedisHandler(ServiceCtlHandler, handlers.FarmSecurityMixin): redis_instances = None @property - def is_replication_master(self): - return __redis__["replication_master"] + def is_replication_main(self): + return __redis__["replication_main"] @property @@ -104,10 +104,10 @@ def get_initialization_phases(self, hir_message): if BEHAVIOUR in hir_message.body: steps = [self._step_accept_scalr_conf, self._step_create_storage] - if hir_message.body[BEHAVIOUR]['replication_master'] == '1': - steps += [self._step_init_master] + if hir_message.body[BEHAVIOUR]['replication_main'] == '1': + steps += [self._step_init_main] else: - steps += [self._step_init_slave] + steps += [self._step_init_subordinate] steps += [self._step_collect_host_up_data] return {'before_host_up': [{ @@ -132,15 +132,15 @@ def __init__(self): '%s_data_bundle' % BEHAVIOUR, - # @param host: New master hostname - 'before_%s_change_master' % BEHAVIOUR, + # @param host: New main hostname + 'before_%s_change_main' % BEHAVIOUR, - # @param host: New master hostname - '%s_change_master' % BEHAVIOUR, + # @param host: New main hostname + '%s_change_main' % BEHAVIOUR, - 'before_slave_promote_to_master', + 'before_subordinate_promote_to_main', - 'slave_promote_to_master' + 'subordinate_promote_to_main' ) self._phase_redis = 'Configure Redis' @@ -151,10 +151,10 @@ def __init__(self): self._step_accept_scalr_conf = 'Accept Scalr configuration' self._step_patch_conf = 'Patch configuration files' self._step_create_storage = 'Create storage' - self._step_init_master = 'Initialize Master' - self._step_init_slave = 'Initialize Slave' + self._step_init_main = 'Initialize Main' + self._step_init_subordinate = 'Initialize Subordinate' self._step_create_data_bundle = 'Create data bundle' - self._step_change_replication_master = 'Change replication Master' + self._step_change_replication_main = 'Change replication Main' self._step_collect_host_up_data = 'Collect HostUp data' self.on_reload() @@ -346,13 +346,13 @@ def on_before_host_up(self, message): @param message: HostUp message """ - repl = 'master' if self.is_replication_master else 'slave' + repl = 'main' if self.is_replication_main else 'subordinate' message.redis = {} - if self.is_replication_master: - self._init_master(message) + if self.is_replication_main: + self._init_main(message) else: - self._init_slave(message) + self._init_subordinate(message) __redis__['volume'] = storage2.volume(__redis__['volume']) @@ -381,7 +381,7 @@ def on_BeforeHostTerminate(self, message): self.redis_instances.save_all() LOG.info('Stopping %s service' % BEHAVIOUR) self.redis_instances.stop('Server will be terminated') - if not self.is_replication_master: + if not self.is_replication_main: LOG.info('Destroying volume %s' % __redis__['volume'].id) __redis__['volume'].destroy(remove_disks=True) LOG.info('Volume %s was destroyed.' % __redis__['volume'].id) @@ -393,23 +393,23 @@ def on_DbMsr_CreateDataBundle(self, message): self._redis_api.create_databundle() - def on_DbMsr_PromoteToMaster(self, message): + def on_DbMsr_PromoteToMain(self, message): """ - Promote slave to master + Promote subordinate to main @type message: scalarizr.messaging.Message - @param message: redis_PromoteToMaster + @param message: redis_PromoteToMain """ if message.db_type != BEHAVIOUR: - LOG.error('Wrong db_type in DbMsr_PromoteToMaster message: %s' % message.db_type) + LOG.error('Wrong db_type in DbMsr_PromoteToMain message: %s' % message.db_type) return - if self.is_replication_master: - LOG.warning('Cannot promote to master. Already master') + if self.is_replication_main: + LOG.warning('Cannot promote to main. Already main') return - bus.fire('before_slave_promote_to_master') + bus.fire('before_subordinate_promote_to_main') - master_storage_conf = message.body.get('volume_config') + main_storage_conf = message.body.get('volume_config') tx_complete = False old_vol = None new_storage_vol = None @@ -420,29 +420,29 @@ def on_DbMsr_PromoteToMaster(self, message): ) try: - if master_storage_conf and master_storage_conf['type'] != 'eph': + if main_storage_conf and main_storage_conf['type'] != 'eph': - self.redis_instances.stop('Unplugging slave storage and then plugging master one') + self.redis_instances.stop('Unplugging subordinate storage and then plugging main one') old_vol = storage2.volume(__redis__['volume']) old_vol.detach(force=True) - new_storage_vol = storage2.volume(master_storage_conf) + new_storage_vol = storage2.volume(main_storage_conf) new_storage_vol.ensure(mount=True) __redis__['volume'] = new_storage_vol - self.redis_instances.init_as_masters(self._storage_path) - __redis__['replication_master'] = 1 + self.redis_instances.init_as_mains(self._storage_path) + __redis__['replication_main'] = 1 msg_data[BEHAVIOUR] = {'volume_config': dict(__redis__['volume'])} self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) tx_complete = True - bus.fire('slave_promote_to_master') + bus.fire('subordinate_promote_to_main') except (Exception, BaseException), e: LOG.exception(e) if new_storage_vol and not new_storage_vol.detached: new_storage_vol.detach(force=True) - # Get back slave storage + # Get back subordinate storage if old_vol: old_vol.ensure(mount=True) __redis__['volume'] = old_vol @@ -457,48 +457,48 @@ def on_DbMsr_PromoteToMaster(self, message): self.redis_instances.start() if tx_complete and old_vol is not None: - # Delete slave EBS + # Delete subordinate EBS old_vol.destroy(remove_disks=True) - def on_DbMsr_NewMasterUp(self, message): + def on_DbMsr_NewMainUp(self, message): """ - Switch replication to a new master server + Switch replication to a new main server @type message: scalarizr.messaging.Message - @param message: DbMsr__NewMasterUp + @param message: DbMsr__NewMainUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: - raise HandlerError("DbMsr_NewMasterUp message for %s behaviour must have '%s' property and db_type '%s'" % + raise HandlerError("DbMsr_NewMainUp message for %s behaviour must have '%s' property and db_type '%s'" % BEHAVIOUR, BEHAVIOUR, BEHAVIOUR) - if self.is_replication_master: - LOG.debug('Skipping NewMasterUp. My replication role is master') + if self.is_replication_main: + LOG.debug('Skipping NewMainUp. My replication role is main') return host = message.local_ip or message.remote_ip - LOG.info("Switching replication to a new %s master %s"% (BEHAVIOUR, host)) - bus.fire('before_%s_change_master' % BEHAVIOUR, host=host) + LOG.info("Switching replication to a new %s main %s"% (BEHAVIOUR, host)) + bus.fire('before_%s_change_main' % BEHAVIOUR, host=host) - self.redis_instances.init_as_slaves(self._storage_path, host) + self.redis_instances.init_as_subordinates(self._storage_path, host) self.redis_instances.wait_for_sync() LOG.debug("Replication switched") - bus.fire('%s_change_master' % BEHAVIOUR, host=host) + bus.fire('%s_change_main' % BEHAVIOUR, host=host) def on_DbMsr_CreateBackup(self, message): self._redis_api.create_backup() - def _init_master(self, message): + def _init_main(self, message): """ - Initialize redis master + Initialize redis main @type message: scalarizr.messaging.Message @param message: HostUp message """ log = bus.init_op.logger - log.info("Initializing %s master" % BEHAVIOUR) + log.info("Initializing %s main" % BEHAVIOUR) log.info('Create storage') # Plug storage @@ -520,15 +520,15 @@ def _init_master(self, message): __redis__['volume'].ensure(mount=True, mkfs=True) LOG.debug('Redis volume config after ensure: %s', dict(__redis__['volume'])) - log.info('Initialize Master') + log.info('Initialize Main') password = self.get_main_password() - self.redis_instances.init_as_masters(mpoint=self._storage_path) + self.redis_instances.init_as_mains(mpoint=self._storage_path) msg_data = dict() msg_data.update({ - "replication_master": '1', - "master_password": password, + "replication_main": '1', + "main_password": password, }) log.info('Collect HostUp data') @@ -548,52 +548,52 @@ def use_passwords(self): def get_main_password(self): - password = __redis__["master_password"] + password = __redis__["main_password"] if self.use_passwords and not password: password = cryptotool.pwgen(20) - __redis__["master_password"] = password + __redis__["main_password"] = password return password - def _get_master_host(self): - master_host = None - LOG.info("Requesting master server") - while not master_host: + def _get_main_host(self): + main_host = None + LOG.info("Requesting main server") + while not main_host: try: - master_host = list(host + main_host = list(host for host in self._queryenv.list_roles(behaviour=BEHAVIOUR)[0].hosts - if host.replication_master)[0] + if host.replication_main)[0] except IndexError: - LOG.debug("QueryEnv respond with no %s master. " % BEHAVIOUR + + LOG.debug("QueryEnv respond with no %s main. " % BEHAVIOUR + "Waiting %d seconds before the next attempt" % 5) time.sleep(5) - return master_host + return main_host - def _init_slave(self, message): + def _init_subordinate(self, message): """ - Initialize redis slave + Initialize redis subordinate @type message: scalarizr.messaging.Message @param message: HostUp message """ log = bus.init_op.logger - log.info("Initializing %s slave" % BEHAVIOUR) + log.info("Initializing %s subordinate" % BEHAVIOUR) log.info('Create storage') - LOG.debug("Initializing slave storage") + LOG.debug("Initializing subordinate storage") __redis__['volume'].ensure(mount=True, mkfs=True) - log.info('Initialize Slave') - # Change replication master - master_host = self._get_master_host() + log.info('Initialize Subordinate') + # Change replication main + main_host = self._get_main_host() - LOG.debug("Master server obtained (local_ip: %s, public_ip: %s)", - master_host.internal_ip, master_host.external_ip) + LOG.debug("Main server obtained (local_ip: %s, public_ip: %s)", + main_host.internal_ip, main_host.external_ip) - host = master_host.internal_ip or master_host.external_ip - self.redis_instances.init_as_slaves(self._storage_path, host) + host = main_host.internal_ip or main_host.external_ip + self.redis_instances.init_as_subordinates(self._storage_path, host) self.redis_instances.wait_for_sync() log.info('Collect HostUp data') @@ -625,5 +625,5 @@ def get_main_password(self): def _after_apply_preset(self): - cli = redis.RedisCLI(__redis__["master_password"]) + cli = redis.RedisCLI(__redis__["main_password"]) cli.bgsave() diff --git a/src/scalarizr/libs/metaconf/__init__.py b/src/scalarizr/libs/metaconf/__init__.py index 858ef6c..324c829 100755 --- a/src/scalarizr/libs/metaconf/__init__.py +++ b/src/scalarizr/libs/metaconf/__init__.py @@ -737,12 +737,12 @@ def _parse(self, iterable): conf = Configuration("ini") bhs = conf.get_list("general/behaviour") platform = conf.get("general/platform") -conf.set("handler_mysql/replication_master", 1, bool) +conf.set("handler_mysql/replication_main", 1, bool) # Access sections sect = conf.subset("handler_mysql") # 1 way sect = conf["handler_mysql"] # 2 shorter way -sect.set("replication_master", 1, bool) +sect.set("replication_main", 1, bool) class XmlFormatProvider: diff --git a/src/scalarizr/node.py b/src/scalarizr/node.py index aacf1e0..a7be841 100755 --- a/src/scalarizr/node.py +++ b/src/scalarizr/node.py @@ -181,7 +181,7 @@ class RedisIni(Ini): def __getitem__(self, key): try: value = super(RedisIni, self).__getitem__(key) - if key in ('use_password', 'replication_master',): + if key in ('use_password', 'replication_main',): if value in (None, ''): value = True else: @@ -190,7 +190,7 @@ def __getitem__(self, key): if 'persistence_type' == key: value = 'snapshotting' self.__setitem__(key, value) - elif 'master_password' == key: + elif 'main_password' == key: value = None else: raise @@ -376,7 +376,7 @@ class ScalrVersion(Store): 'volume,volume_config': Json('%s/storage/%s.json' % (private_dir, 'mysql'), 'scalarizr.storage2.volume'), - 'root_password,repl_password,stat_password,log_file,log_pos,replication_master': + 'root_password,repl_password,stat_password,log_file,log_pos,replication_main': Ini('%s/%s.ini' % (private_dir, behavior), section), 'mysqldump_options': Ini('%s/%s.ini' % (public_dir, behavior), section) @@ -385,7 +385,7 @@ class ScalrVersion(Store): node['redis'] = Compound({ 'volume,volume_config': Json( '%s/storage/%s.json' % (private_dir, 'redis'), 'scalarizr.storage2.volume'), - 'replication_master,persistence_type,use_password,master_password': RedisIni( + 'replication_main,persistence_type,use_password,main_password': RedisIni( '%s/%s.ini' % (private_dir, 'redis'), 'redis') }) @@ -401,7 +401,7 @@ class ScalrVersion(Store): node['postgresql'] = Compound({ 'volume,volume_config': Json('%s/storage/%s.json' % (private_dir, 'postgresql'), 'scalarizr.storage2.volume'), -'replication_master,pg_version,scalr_password,root_password, root_user': Ini( +'replication_main,pg_version,scalr_password,root_password, root_user': Ini( '%s/%s.ini' % (private_dir, 'postgresql'), 'postgresql') }) diff --git a/src/scalarizr/queryenv.py b/src/scalarizr/queryenv.py index 8b2bca8..99ad978 100755 --- a/src/scalarizr/queryenv.py +++ b/src/scalarizr/queryenv.py @@ -556,7 +556,7 @@ def from_dict(cls, dict_data): class RoleHost(QueryEnvResult): index = None - replication_master = False + replication_main = False internal_ip = None external_ip = None shard_index = None @@ -564,7 +564,7 @@ class RoleHost(QueryEnvResult): status = None cloud_location = None - def __init__(self, index=None, replication_master=False, internal_ip=None, external_ip=None, + def __init__(self, index=None, replication_main=False, internal_ip=None, external_ip=None, shard_index=None, replica_set_index=None, status=None, cloud_location=None): self.internal_ip = internal_ip self.external_ip = external_ip @@ -572,8 +572,8 @@ def __init__(self, index=None, replication_master=False, internal_ip=None, exter self.cloud_location = cloud_location if index: self.index = int(index) - if replication_master: - self.replication_master = bool(int(replication_master)) + if replication_main: + self.replication_main = bool(int(replication_main)) if shard_index: self.shard_index = int(shard_index) if replica_set_index: @@ -581,7 +581,7 @@ def __init__(self, index=None, replication_master=False, internal_ip=None, exter def __repr__(self): return "index = " + str(self.index) \ - + "; replication_master = " + str(self.replication_master) \ + + "; replication_main = " + str(self.replication_main) \ + "; internal_ip = " + str(self.internal_ip) \ + "; external_ip = " + str(self.external_ip) \ + "; shard_index = " + str(self.shard_index) \ diff --git a/src/scalarizr/services/backup.py b/src/scalarizr/services/backup.py index 821cef2..7794d67 100755 --- a/src/scalarizr/services/backup.py +++ b/src/scalarizr/services/backup.py @@ -53,7 +53,7 @@ def restore(*args, **kwds): class Backup(bases.Task): features = { - 'start_slave': True + 'start_subordinate': True } def __init__(self, @@ -71,13 +71,13 @@ def __init__(self, class Restore(bases.Task): features = { - 'master_binlog_reset': False + 'main_binlog_reset': False } ''' - When 'master_binlog_reset' = False, - rolling this restore on Master causes replication binary log reset. - Slaves should start from the binary log head. Detecting the first - position in binary log is implementation dependent and Master is + When 'main_binlog_reset' = False, + rolling this restore on Main causes replication binary log reset. + Subordinates should start from the binary log head. Detecting the first + position in binary log is implementation dependent and Main is responsible for this. ''' diff --git a/src/scalarizr/services/mongodb.py b/src/scalarizr/services/mongodb.py index f3243d2..95ea3e0 100755 --- a/src/scalarizr/services/mongodb.py +++ b/src/scalarizr/services/mongodb.py @@ -135,9 +135,9 @@ def __new__(cls, *args, **kwargs): @property - def is_replication_master(self): - res = self.cli.is_master()['ismaster'] - self._logger.debug("Replication master: %s", res) + def is_replication_main(self): + res = self.cli.is_main()['ismain'] + self._logger.debug("Replication main: %s", res) return res @@ -198,9 +198,9 @@ def initiate_rs(self): @return (host:port) ''' self.cli.initiate_rs() - wait_until(lambda: self.is_replication_master, sleep=5, logger=self._logger, + wait_until(lambda: self.is_replication_main, sleep=5, logger=self._logger, timeout=120, start_text='Wait until node becomes replication primary') - self._logger.debug('Server became replication master') + self._logger.debug('Server became replication main') def start_shardsvr(self): @@ -268,7 +268,7 @@ def stop_default_init_script(self): self.default_init_script.stop('Stopping default mongod service') - def register_slave(self, ip, port=None): + def register_subordinate(self, ip, port=None): ret = self.cli.add_replica(ip, port) if ret['ok'] == '0': self._logger.error('Could not add replica %s to set: %s' % (ip, ret['errmsg'])) @@ -280,8 +280,8 @@ def register_arbiter(self,ip,port=None): self._logger.error('Could not add arbiter %s to set: %s' % (ip, ret['errmsg'])) - def unregister_slave(self,ip,port=None): - ret = self.cli.remove_slave(ip, port) + def unregister_subordinate(self,ip,port=None): + ret = self.cli.remove_subordinate(ip, port) if ret['ok'] == '0': self._logger.error('Could not remove replica %s from set: %s' % (ip, ret['errmsg'])) @@ -324,7 +324,7 @@ def status(self): @property def replicas(self): self._logger.debug('Querying list of replicas') - ret = self.cli.is_master() + ret = self.cli.is_main() rep_list = ret['hosts'] if 'hosts' in ret else [] self._logger.debug('Current replicas are %s' % rep_list) return rep_list @@ -333,7 +333,7 @@ def replicas(self): @property def arbiters(self): self._logger.debug('Querying list of arbiters') - ret = self.cli.is_master() + ret = self.cli.is_main() arbiter_list = ret['arbiters'] if 'arbiters' in ret else [] self._logger.debug('Current arbiters are %s' % arbiter_list) return arbiter_list @@ -341,7 +341,7 @@ def arbiters(self): @property def primary_host(self): self._logger.debug('Getting current primary host') - ret = self.cli.is_master() + ret = self.cli.is_main() return ret['primary'] if 'primary' in ret else None @@ -990,9 +990,9 @@ def add_replica(self, ip, port=None, arbiter=False): @autoreconnect - def is_master(self): - self._logger.debug('Checking if node is master') - return self.connection.admin.command('isMaster') + def is_main(self): + self._logger.debug('Checking if node is main') + return self.connection.admin.command('isMain') @autoreconnect @@ -1040,7 +1040,7 @@ def add_arbiter(self,ip, port=None): return self.add_replica(ip, port, arbiter=True) - def remove_slave(self, ip, port=None): + def remove_subordinate(self, ip, port=None): port = port or REPLICA_DEFAULT_PORT host_to_del = "%s:%s" % (ip, port) diff --git a/src/scalarizr/services/mysql.py b/src/scalarizr/services/mysql.py index 97bc6c2..542c0e3 100755 --- a/src/scalarizr/services/mysql.py +++ b/src/scalarizr/services/mysql.py @@ -80,29 +80,29 @@ def __init__(self): fp.close() - def _init_replication(self, master=True): + def _init_replication(self, main=True): LOG.info('Initializing replication') - server_id = 1 if master else int(random.random() * 100000)+1 + server_id = 1 if main else int(random.random() * 100000)+1 self.my_cnf.server_id = server_id self.my_cnf.delete_options(['mysqld/bind-address', 'mysqld/skip-networking']) - def init_master(self): + def init_main(self): pass - def init_slave(self): + def init_subordinate(self): pass def _init_service(self): pass - def change_master_to(self): - # client.change_master_to + def change_main_to(self): + # client.change_main_to # check_replication_health and wait pass def check_replication_health(self): - # set slave status + # set subordinate status # on fail get status from error.log pass @@ -158,7 +158,7 @@ def flush_logs(self, data_dir): if not os.path.exists(data_dir): return - info_files = ['relay-log.info', 'master.info'] + info_files = ['relay-log.info', 'main.info'] files = os.listdir(data_dir) for file in files: @@ -212,23 +212,23 @@ def list_databases(self): return databases - def start_slave(self): + def start_subordinate(self): return self.fetchone('START SLAVE') - def stop_slave(self): + def stop_subordinate(self): return self.fetchone("STOP SLAVE") - def reset_slave(self): + def reset_subordinate(self): return self.fetchone("RESET SLAVE") - def stop_slave_io_thread(self): + def stop_subordinate_io_thread(self): return self.fetchone("STOP SLAVE IO_THREAD") - def start_slave_io_thread(self): + def start_subordinate_io_thread(self): return self.fetchone("START SLAVE IO_THREAD") @@ -278,7 +278,7 @@ def set_user_password(self, username, host, password): def flush_privileges(self): return self.fetchone("FLUSH PRIVILEGES") - def change_master_to(self, host, user, password, log_file, log_pos): + def change_main_to(self, host, user, password, log_file, log_pos): return self.fetchone('CHANGE MASTER TO MASTER_HOST="%(host)s", \ MASTER_USER="%(user)s", \ MASTER_PASSWORD="%(password)s", \ @@ -287,16 +287,16 @@ def change_master_to(self, host, user, password, log_file, log_pos): MASTER_CONNECT_RETRY=15;' % vars()) - def slave_status(self): + def subordinate_status(self): ret = self.fetchdict("SHOW SLAVE STATUS") - LOG.debug('slave status: %s' % str(ret)) + LOG.debug('subordinate status: %s' % str(ret)) if ret: return ret else: - raise ServiceError('SHOW SLAVE STATUS returned empty set. Slave is not started?') + raise ServiceError('SHOW SLAVE STATUS returned empty set. Subordinate is not started?') - def master_status(self): + def main_status(self): out = self.fetchdict('SHOW MASTER STATUS') log_file, log_pos = None, None if out: @@ -304,7 +304,7 @@ def master_status(self): return (log_file, log_pos) - def reset_master(self): + def reset_main(self): return self.fetchone("RESET MASTER") @@ -576,7 +576,7 @@ class RepicationWatcher(threading.Thread): _state = None _client = None - _master_host = None + _main_host = None _repl_user = None _repl_password = None @@ -586,15 +586,15 @@ class RepicationWatcher(threading.Thread): TIMEOUT = 60 - def __init__(self, client, master_host, repl_user, repl_password): + def __init__(self, client, main_host, repl_user, repl_password): super(RepicationWatcher, self).__init__() self._client = client - self.change_master_host(master_host, repl_user, repl_password) + self.change_main_host(main_host, repl_user, repl_password) - def change_master_host(self, host, user, password): + def change_main_host(self, host, user, password): self.suspend() - self._master_host = host + self._main_host = host self._repl_user = user self._repl_password = password self.resume() @@ -606,25 +606,25 @@ def start(self): if self._state == self.WATCHER_RUNNING: r_status = None try: - r_status = self._client.slave_status() + r_status = self._client.subordinate_status() except ServiceError, e: LOG.error(e) if not r_status: time.sleep(self.TIMEOUT) - elif r_status['Slave_IO_Running'] == 'Yes' and r_status['Slave_SQL_Running'] == 'Yes': + elif r_status['Subordinate_IO_Running'] == 'Yes' and r_status['Subordinate_SQL_Running'] == 'Yes': time.sleep(self.TIMEOUT) - elif r_status and r_status['Slave_SQL_Running'] == 'No' and \ + elif r_status and r_status['Subordinate_SQL_Running'] == 'No' and \ 'Relay log read failure: Could not parse relay log event entry' in r_status['Last_Error']: - self.repair_relaylog(r_status['Relay_Master_Log_File'], r_status['Exec_Master_Log_Pos']) + self.repair_relaylog(r_status['Relay_Main_Log_File'], r_status['Exec_Main_Log_Pos']) time.sleep(self.TIMEOUT) else: self.suspend() - msg = 'Replication is broken. Slave_IO_Running=%s, Slave_SQL_Running=%s, Last_Error=%s' % ( - r_status['Slave_IO_Running'], - r_status['Slave_SQL_Running'], + msg = 'Replication is broken. Subordinate_IO_Running=%s, Subordinate_SQL_Running=%s, Last_Error=%s' % ( + r_status['Subordinate_IO_Running'], + r_status['Subordinate_SQL_Running'], r_status['Last_Error'] ) LOG.error(msg) @@ -632,10 +632,10 @@ def start(self): def repair_relaylog(self, log_file, log_pos): LOG.info('Repairing relay log') try: - self._client.stop_slave() - self._client.reset_slave() - self._client.change_master_to(self._master_host, self._repl_user, self._repl_password, log_file, log_pos) - self._client.sart_slave() + self._client.stop_subordinate() + self._client.reset_subordinate() + self._client.change_main_to(self._main_host, self._repl_user, self._repl_password, log_file, log_pos) + self._client.sart_subordinate() except BaseException, e: self.suspend() LOG.error(e) diff --git a/src/scalarizr/services/mysql2.py b/src/scalarizr/services/mysql2.py index 57c744a..1618bbc 100755 --- a/src/scalarizr/services/mysql2.py +++ b/src/scalarizr/services/mysql2.py @@ -48,13 +48,13 @@ class Error(Exception): 'repl_user': 'scalr_repl', 'stat_user': 'scalr_stat', 'pma_user': 'pma', - 'master_user': 'scalr_master', - 'master_password': '', + 'main_user': 'scalr_main', + 'main_password': '', 'debian.cnf': '/etc/mysql/debian.cnf', 'my.cnf': '/etc/my.cnf' if linux.os['family'] in ('RedHat', 'Oracle') else '/etc/mysql/my.cnf', 'mysqldump_chunk_size': 200, - 'stop_slave_timeout': 180, - 'change_master_timeout': 60, + 'stop_subordinate_timeout': 180, + 'change_main_timeout': 60, 'defaults': { 'datadir': '/var/lib/mysql', 'log_bin': 'mysql_bin' @@ -81,12 +81,12 @@ def freeze(self, volume, state): client = self._client() client.lock_tables() coreutils.sync() - if int(__mysql__['replication_master']): - (log_file, log_pos) = client.master_status() + if int(__mysql__['replication_main']): + (log_file, log_pos) = client.main_status() else: - slave_status = client.slave_status() - log_pos = slave_status['Exec_Master_Log_Pos'] - log_file = slave_status['Master_Log_File'] + subordinate_status = client.subordinate_status() + log_pos = subordinate_status['Exec_Main_Log_Pos'] + log_file = subordinate_status['Main_Log_File'] upd = {'log_file': log_file, 'log_pos': log_pos} state.update(upd) @@ -153,8 +153,8 @@ def __init__(self, "\(for incremental\): '(\d+:\d+)'") self._re_binlog = re.compile(r"innobackupex: MySQL binlog position: " \ "filename '([^']+)', position (\d+)") - self._re_slave_binlog = re.compile(r"innobackupex: MySQL slave binlog position: " \ - "master host '[^']+', filename '([^']+)', position (\d+)") + self._re_subordinate_binlog = re.compile(r"innobackupex: MySQL subordinate binlog position: " \ + "main host '[^']+', filename '([^']+)', position (\d+)") self._re_lsn_innodb_stat = re.compile(r"Log sequence number \d+ (\d+)") self._killed = False @@ -176,9 +176,9 @@ def _run(self): } if self.no_lock: kwds['no_lock'] = True - if not int(__mysql__['replication_master']): - kwds['safe_slave_backup'] = True - kwds['slave_info'] = True + if not int(__mysql__['replication_main']): + kwds['safe_subordinate_backup'] = True + kwds['subordinate_info'] = True current_lsn = None if self.backup_type == 'auto': @@ -242,8 +242,8 @@ def _run(self): log_file = log_pos = to_lsn = None re_binlog = self._re_binlog \ - if int(__mysql__['replication_master']) else \ - self._re_slave_binlog + if int(__mysql__['replication_main']) else \ + self._re_subordinate_binlog for line in stderr.splitlines(): m = self._re_lsn.search(line) or self._re_lsn_51.search(line) if m: @@ -293,12 +293,12 @@ def _xbak_kill(self): LOG.debug("Killing process tree of pid %s" % self._xbak.pid) eradicate(self._xbak) - # sql-slave not running? run - if not int(__mysql__['replication_master']): + # sql-subordinate not running? run + if not int(__mysql__['replication_main']): try: - self._client().start_slave_io_thread() + self._client().start_subordinate_io_thread() except: - LOG.warning('Cannot start slave io thread', exc_info=sys.exc_info()) + LOG.warning('Cannot start subordinate io thread', exc_info=sys.exc_info()) class XtrabackupStreamRestore(XtrabackupMixin, backup.Restore): @@ -396,10 +396,10 @@ def _run(self): coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql') self._mysql_init.start() - if int(__mysql__['replication_master']): - LOG.info("Master will reset it's binary logs, " + if int(__mysql__['replication_main']): + LOG.info("Main will reset it's binary logs, " "so updating binary log position in backup manifest") - log_file, log_pos = self._client().master_status() + log_file, log_pos = self._client().main_status() meta = mnf.meta meta.update({'log_file': log_file, 'log_pos': log_pos}) mnf.meta = meta @@ -429,7 +429,7 @@ def __init__(self, chunk_size=chunk_size or __mysql__['mysqldump_chunk_size'], **kwds) self.features.update({ - 'start_slave': False + 'start_subordinate': False }) self.transfer = None self._popens = [] diff --git a/src/scalarizr/services/postgresql.py b/src/scalarizr/services/postgresql.py index c1503ce..80626b1 100755 --- a/src/scalarizr/services/postgresql.py +++ b/src/scalarizr/services/postgresql.py @@ -40,14 +40,14 @@ PG_DUMP = '/usr/bin/pg_dump' ROOT_USER = "scalr" -MASTER_USER = "scalr_master" +MASTER_USER = "scalr_main" DEFAULT_USER = "postgres" STORAGE_DATA_DIR = "data" TRIGGER_NAME = "trigger" PRESET_FNAME = 'postgresql.conf' OPT_PG_VERSION = 'pg_version' -OPT_REPLICATION_MASTER = "replication_master" +OPT_REPLICATION_MASTER = "replication_main" LOG = logging.getLogger(__name__) __postgresql__ = __node__[SERVICE_NAME] @@ -139,21 +139,21 @@ def unified_etc_path(self): return '/etc/postgresql/%s/main' % self.version if float(self.version) else '9.0' - def init_master(self, mpoint, password, slaves=None): + def init_main(self, mpoint, password, subordinates=None): self._init_service(mpoint, password) self.postgresql_conf.hot_standby = 'off' self.create_pg_role(ROOT_USER, password, super=True) self.create_pg_role(MASTER_USER, password, super=True, force=False) - if slaves: - LOG.debug('Registering slave hosts: %s' % ' '.join(slaves)) - for host in slaves: - self.register_slave(host, force_restart=False) + if subordinates: + LOG.debug('Registering subordinate hosts: %s' % ' '.join(subordinates)) + for host in subordinates: + self.register_subordinate(host, force_restart=False) self.service.start() - def init_slave(self, mpoint, primary_ip, primary_port, password): + def init_subordinate(self, mpoint, primary_ip, primary_port, password): self._init_service(mpoint, password) self.root_user.apply_public_ssh_key() @@ -164,7 +164,7 @@ def init_slave(self, mpoint, primary_ip, primary_port, password): trigger_path = os.path.join(self.config_dir.path, TRIGGER_NAME) if os.path.exists(trigger_path): - #in case master was rebundled with trigger enabled + #in case main was rebundled with trigger enabled os.remove(trigger_path) self.recovery_conf.trigger_file = trigger_path @@ -172,11 +172,11 @@ def init_slave(self, mpoint, primary_ip, primary_port, password): self.service.start() - def register_slave(self, slave_ip, force_restart=True): - self.pg_hba_conf.add_standby_host(slave_ip, self.root_user.name) + def register_subordinate(self, subordinate_ip, force_restart=True): + self.pg_hba_conf.add_standby_host(subordinate_ip, self.root_user.name) self.postgresql_conf.max_wal_senders += 1 if force_restart: - self.service.reload(reason='Registering slave', force=True) + self.service.reload(reason='Registering subordinate', force=True) def register_client(self, ip, force=True): @@ -188,9 +188,9 @@ def change_primary(self, primary_ip, primary_port, username): self.recovery_conf.primary_conninfo = (primary_ip, primary_port, username) - def unregister_slave(self, slave_ip): - self.pg_hba_conf.delete_standby_host(slave_ip, self.root_user.name) - self.service.reload(reason='Unregistering slave', force=True) + def unregister_subordinate(self, subordinate_ip): + self.pg_hba_conf.delete_standby_host(subordinate_ip, self.root_user.name) + self.service.reload(reason='Unregistering subordinate', force=True) def unregister_client(self, ip): self.pg_hba_conf.delete_client(ip) @@ -244,10 +244,10 @@ def _init_service(self, mpoint, password): self.service.stop() self.root_user = self.create_linux_user(ROOT_USER, password) - self.master_user = self.create_linux_user(MASTER_USER, password) + self.main_user = self.create_linux_user(MASTER_USER, password) self.first_start = move_files = not self.cluster_dir.is_initialized(mpoint) - LOG.debug("Master node is being initialized for the first time: %s" % self.first_start) + LOG.debug("Main node is being initialized for the first time: %s" % self.first_start) self.postgresql_conf.data_directory = self.cluster_dir.move_to(mpoint, move_files) self.postgresql_conf.listen_addresses = '*' self.postgresql_conf.wal_level = 'hot_standby' @@ -326,17 +326,17 @@ def _get_root_user(self): def _set_root_user(self, user): self._set('root_user', user) - def _get_master_user(self): - key = 'master' + def _get_main_user(self): + key = 'main' if not self._objects.has_key(key): self._objects[key] = PgUser(MASTER_USER, self.pg_keys_dir) return self._objects[key] - def _set_master_user(self, user): - self._set('master', user) + def _set_main_user(self, user): + self._set('main', user) root_user = property(_get_root_user, _set_root_user) - master_user = property(_get_master_user, _set_master_user) + main_user = property(_get_main_user, _set_main_user) config_dir = property(_get_config_dir, _set_config_dir) cluster_dir = property(_get_cluster_dir, _set_cluster_dir) postgresql_conf = property(_get_postgresql_conf, _set_postgresql_conf) @@ -668,7 +668,7 @@ def move_to(self, dst, move_files=True): return new_cluster_dir def clean(self): - fnames = ('recovery.conf','recovery.done','postmaster.pid') + fnames = ('recovery.conf','recovery.done','postmain.pid') for fname in fnames: exclude = os.path.join(self.path, fname) if os.path.exists(exclude): @@ -757,7 +757,7 @@ def move_to(self, dst): LOG.debug("configuring pid") conf = PostgresqlConf.find(self) if not centos7: - conf.pid_file = os.path.join(dst, 'postmaster.pid') # [SCALARIZR-1685] + conf.pid_file = os.path.join(dst, 'postmain.pid') # [SCALARIZR-1685] def _patch_sysconfig(self, config_dir): @@ -1188,7 +1188,7 @@ class ParseError(BaseException): def make_symlinks(source_dir, dst_dir, username='postgres'): #Vital hack to get init script to work on CentOS 5x/6x - for obj in ['base', 'PG_VERSION', 'postmaster.pid']: + for obj in ['base', 'PG_VERSION', 'postmain.pid']: src = os.path.join(source_dir, obj) dst = os.path.join(dst_dir, obj) diff --git a/src/scalarizr/services/rabbitmq.py b/src/scalarizr/services/rabbitmq.py index 953d0bc..b2a6f36 100755 --- a/src/scalarizr/services/rabbitmq.py +++ b/src/scalarizr/services/rabbitmq.py @@ -153,8 +153,8 @@ def _check_admin_user(self, username, password): def check_scalr_user(self, password): self._check_admin_user(SCALR_USERNAME, password) - def check_master_user(self, password): - self._check_admin_user('scalr_master', password) + def check_main_user(self, password): + self._check_admin_user('scalr_main', password) def add_user(self, username, password, is_admin=False): system2((RABBITMQCTL, 'add_user', username, password), logger=self._logger) diff --git a/src/scalarizr/services/redis.py b/src/scalarizr/services/redis.py index 55d1dee..b39e1e4 100755 --- a/src/scalarizr/services/redis.py +++ b/src/scalarizr/services/redis.py @@ -342,22 +342,22 @@ def save_all(self): if redis.service.running: redis.redis_cli.save() - def init_as_masters(self, mpoint): + def init_as_mains(self, mpoint): passwords = [] ports = [] for redis in self.instances: - redis.init_master(mpoint) + redis.init_main(mpoint) passwords.append(redis.password) ports.append(redis.port) return ports, passwords - def init_as_slaves(self, mpoint, primary_ip): + def init_as_subordinates(self, mpoint, primary_ip): passwords = [] ports = [] for redis in self.instances: passwords.append(redis.password) ports.append(redis.port) - redis.init_slave(mpoint, primary_ip, redis.port) + redis.init_subordinate(mpoint, primary_ip, redis.port) return ports, passwords def wait_for_sync(self, link_timeout=None, sync_timeout=None): @@ -377,35 +377,35 @@ def __init__(self, port=__redis__['defaults']['port'], password=None): self.port = port self.password = password - def init_master(self, mpoint): - self.service.stop('Configuring master. Moving Redis db files') + def init_main(self, mpoint): + self.service.stop('Configuring main. Moving Redis db files') self.init_service(mpoint) - self.redis_conf.masterauth = None - self.redis_conf.slaveof = None + self.redis_conf.mainauth = None + self.redis_conf.subordinateof = None self.service.start() return self.current_password - def init_slave(self, mpoint, primary_ip, primary_port): - self.service.stop('Configuring slave') + def init_subordinate(self, mpoint, primary_ip, primary_port): + self.service.stop('Configuring subordinate') self.init_service(mpoint) self.change_primary(primary_ip, primary_port) self.service.start() return self.current_password def wait_for_sync(self,link_timeout=None,sync_timeout=None): - LOG.info('Waiting for link with master') - wait_until(lambda: self.redis_cli.master_link_status == 'up', sleep=3, timeout=link_timeout) - LOG.info('Waiting for sync with master to complete') - wait_until(lambda: not self.redis_cli.master_sync_in_progress, sleep=10, timeout=sync_timeout) - LOG.info('Sync with master completed') + LOG.info('Waiting for link with main') + wait_until(lambda: self.redis_cli.main_link_status == 'up', sleep=3, timeout=link_timeout) + LOG.info('Waiting for sync with main to complete') + wait_until(lambda: not self.redis_cli.main_sync_in_progress, sleep=10, timeout=sync_timeout) + LOG.info('Sync with main completed') def change_primary(self, primary_ip, primary_port): """ - Currently redis slaves cannot use existing data to catch up with master + Currently redis subordinates cannot use existing data to catch up with main Instead they create another db file while performing full sync Wchich may potentially cause free space problem on redis storage And broke whole initializing process. - So scalarizr removes all existing data on initializing slave + So scalarizr removes all existing data on initializing subordinate to free as much storage space as possible. """ aof_fname = self.redis_conf.appendfilename @@ -416,8 +416,8 @@ def change_primary(self, primary_ip, primary_port): os.remove(path) LOG.info("Old db file removed: %s" % path) - self.redis_conf.masterauth = self.password - self.redis_conf.slaveof = (primary_ip, primary_port) + self.redis_conf.mainauth = self.password + self.redis_conf.subordinateof = (primary_ip, primary_port) def init_service(self, mpoint): if not os.path.exists(mpoint): @@ -585,23 +585,23 @@ def _set_bind(self, list_ips): self.set_sequential_option('bind', list_ips) - def _get_slaveof(self): - return self.get_sequential_option('slaveof') + def _get_subordinateof(self): + return self.get_sequential_option('subordinateof') - def _set_slaveof(self, conn_data): + def _set_subordinateof(self, conn_data): ''' @tuple conndata: (ip,) or (ip, port) ''' - self.set_sequential_option('slaveof', conn_data) + self.set_sequential_option('subordinateof', conn_data) - def _get_masterauth(self): - return self.get('masterauth') + def _get_mainauth(self): + return self.get('mainauth') - def _set_masterauth(self, passwd): - self.set('masterauth', passwd) + def _set_mainauth(self, passwd): + self.set('mainauth', passwd) def _get_requirepass(self): @@ -693,8 +693,8 @@ def _set_appendfsync(self, value): dir = property(_get_dir, _set_dir) save = property(_get_save, _set_save) bind = property(_get_bind, _set_bind) - slaveof = property(_get_slaveof, _set_slaveof) - masterauth = property(_get_masterauth, _set_masterauth) + subordinateof = property(_get_subordinateof, _set_subordinateof) + mainauth = property(_get_mainauth, _set_mainauth) requirepass = property(_get_requirepass, _set_requirepass) appendonly = property(_get_appendonly, _set_appendonly) dbfilename = property(_get_dbfilename, _set_dbfilename) @@ -806,8 +806,8 @@ def changes_since_last_save(self): @property - def connected_slaves(self): - return int(self.info['connected_slaves']) + def connected_subordinates(self): + return int(self.info['connected_subordinates']) @property @@ -826,26 +826,26 @@ def role(self): @property - def master_host(self): + def main_host(self): info = self.info - if info['role']=='slave': - return info['master_host'] + if info['role']=='subordinate': + return info['main_host'] return None @property - def master_port(self): + def main_port(self): info = self.info - if info['role']=='slave': - return int(info['master_port']) + if info['role']=='subordinate': + return int(info['main_port']) return None @property - def master_link_status(self): + def main_link_status(self): info = self.info - if info['role'] == 'slave': - return info['master_link_status'] + if info['role'] == 'subordinate': + return info['main_link_status'] return None @@ -869,18 +869,18 @@ def save(self): @property - def master_last_io_seconds_ago(self): + def main_last_io_seconds_ago(self): info = self.info - if info['role'] == 'slave': - return int(info['master_last_io_seconds_ago']) + if info['role'] == 'subordinate': + return int(info['main_last_io_seconds_ago']) return None @property - def master_sync_in_progress(self): + def main_sync_in_progress(self): info = self.info - if info['role'] == 'slave': - return True if info['master_sync_in_progress']=='1' else False + if info['role'] == 'subordinate': + return True if info['main_sync_in_progress']=='1' else False return False diff --git a/src/scalarizr/storage2/volumes/eph.py b/src/scalarizr/storage2/volumes/eph.py index 460caa0..3756400 100755 --- a/src/scalarizr/storage2/volumes/eph.py +++ b/src/scalarizr/storage2/volumes/eph.py @@ -55,7 +55,7 @@ def __init__(self, vg=None, disk=None, disks=None, def _ensure(self): # snap should be applied after layout: download and extract data. # this could be done on already ensured volume. - # Example: resync slave data + # Example: resync subordinate data if not self._lvm_volume: # First of all, merge self config and snapshot config diff --git a/src/scalarizr/updclient/pkgmgr.py b/src/scalarizr/updclient/pkgmgr.py index e513efc..4ab3ac3 100755 --- a/src/scalarizr/updclient/pkgmgr.py +++ b/src/scalarizr/updclient/pkgmgr.py @@ -452,7 +452,7 @@ def _set_repo_string(self, repo_string): self.plain = True elif len(parts) == 3: # APT-pool repo - # "http://stridercd.scalr-labs.com/apt/develop master main" + # "http://stridercd.scalr-labs.com/apt/develop main main" self.plain = False self.component = parts[2] else: diff --git a/src/scalarizr/util/sqlite_server.py b/src/scalarizr/util/sqlite_server.py index 8c59088..4cb25e0 100755 --- a/src/scalarizr/util/sqlite_server.py +++ b/src/scalarizr/util/sqlite_server.py @@ -141,8 +141,8 @@ def _set_text_factory(self, f): class SqliteServer(object): def __init__(self, conn_creator): - self._master_conn = conn_creator() - self._master_conn.isolation_level = None + self._main_conn = conn_creator() + self._main_conn.isolation_level = None self._single_conn_proxy = None self._clients = WeakValueDictionary() self._cursors = {} @@ -187,7 +187,7 @@ def serve_forever(self): def _cursor_create(self, hash, proxy): """ - self._cursors[hash] = self._master_conn.cursor() + self._cursors[hash] = self._main_conn.cursor() return self._cursors[hash] """ #LOG.debug('create cursor %s', hash) @@ -208,7 +208,7 @@ def _cursor_delete(self, hash): def _cursor_execute(self, hash, *args, **kwds): - cur = self._master_conn.cursor() + cur = self._main_conn.cursor() try: cur.execute(*args, **kwds) return { @@ -241,23 +241,23 @@ def _cursor_rowcount(self, hash): def _conn_set_row_factory(self, hash, f): - self._master_conn.row_factory = f + self._main_conn.row_factory = f def _conn_set_text_factory(self, hash, f): - self._master_conn.text_factory = f + self._main_conn.text_factory = f def _conn_get_row_factory(self, hash): - return self._master_conn.row_factory + return self._main_conn.row_factory def _conn_get_text_factory(self, hash): - return self._master_conn.text_factory + return self._main_conn.text_factory def _conn_executescript(self, hash, sql): - return self._master_conn.executescript(sql) + return self._main_conn.executescript(sql) def _conn_execute(self, hash, *args, **kwds): diff --git a/src/scalarizr/util/szradm.py b/src/scalarizr/util/szradm.py index 3318dcf..9fdea48 100755 --- a/src/scalarizr/util/szradm.py +++ b/src/scalarizr/util/szradm.py @@ -262,7 +262,7 @@ class ListRolesCommand(Command): method = "list_roles" group = "QueryEnv" fields = ['behaviour','name', 'farm-role-id', 'index', 'internal-ip', - 'external-ip', 'replication-master'] + 'external-ip', 'replication-main'] parser = OptionParser(usage='list-roles [-b --behaviour] ' '[-r --role] [--with-initializing]', description='Display roles list', formatter= IndHelpFormatter()) @@ -278,7 +278,7 @@ def iter_result(self, result): for host in d.hosts: yield [behaviour, d.name, d.farm_role_id, str(host.index), host.internal_ip, host.external_ip, - str(host.replication_master)] + str(host.replication_main)] class GetHttpsCertificateCommand(Command): diff --git a/tests/acceptance/haproxy.py b/tests/acceptance/haproxy.py index a1437cd..c97760a 100644 --- a/tests/acceptance/haproxy.py +++ b/tests/acceptance/haproxy.py @@ -391,9 +391,9 @@ def server_is_removed_from_the_backend(step): assert not server_name in world.api.cfg.backends["scalr:backend:tcp:27000"]['server'] -@step("i have a proxy to two roles: master and backup") +@step("i have a proxy to two roles: main and backup") def i_have_a_proxy_to_two_roles(step): - step.given("i have a role master") + step.given("i have a role main") step.given("i have a backup role backup") step.given("i add proxy") @@ -405,11 +405,11 @@ def i_have_a_proxy_to_two_servers(step): step.given("i add proxy") -@step("i terminate master servers") -def i_terminate_master_servers(step): - master_role = world.roles["master"] +@step("i terminate main servers") +def i_terminate_main_servers(step): + main_role = world.roles["main"] - for server in master_role.servers: + for server in main_role.servers: world.api.remove_server({ 'port': server.address[1], 'host': server.address[0], diff --git a/tests/acceptance/nginx.py b/tests/acceptance/nginx.py index b6e9582..8ee1df8 100644 --- a/tests/acceptance/nginx.py +++ b/tests/acceptance/nginx.py @@ -419,8 +419,8 @@ def http_error_302(self, req, fp, code, msg, headers): # Scenario 7 -@step(u'Given I have a proxy to two roles: master and backup') -def given_i_have_a_proxy_to_two_roles_master_and_backup(step): +@step(u'Given I have a proxy to two roles: main and backup') +def given_i_have_a_proxy_to_two_roles_main_and_backup(step): server1_port = 8001 server2_port = 8002 server3_port = 8003 @@ -456,8 +456,8 @@ def get_role_servers(role): assert world.expected_response3 not in responses -@step(u'When I terminate master servers') -def when_i_terminate_master_servers(step): +@step(u'When I terminate main servers') +def when_i_terminate_main_servers(step): world.server1.go_down() world.server2.go_down() diff --git a/tests/unit/scalarizrtests/handlers/test_cassandra.py b/tests/unit/scalarizrtests/handlers/test_cassandra.py index 9c2e6f7..2adfbfb 100644 --- a/tests/unit/scalarizrtests/handlers/test_cassandra.py +++ b/tests/unit/scalarizrtests/handlers/test_cassandra.py @@ -45,12 +45,12 @@ def list_roles(self, behaviour): return [_Bunch( behaviour = "cassandra", name = "cassandra-node-1", - hosts = [_Bunch(index='1',replication_master="1",internal_ip="192.168.1.93",external_ip="8.8.8.8")] + hosts = [_Bunch(index='1',replication_main="1",internal_ip="192.168.1.93",external_ip="8.8.8.8")] ), _Bunch( behaviour = "cassandra", name = "cassandra-node-2", - hosts = [_Bunch(index='2',replication_master="0",internal_ip=None,external_ip="8.8.8.9")] + hosts = [_Bunch(index='2',replication_main="0",internal_ip=None,external_ip="8.8.8.9")] )] class _Message: def __init__(self): diff --git a/tests/unit/scalarizrtests/handlers/test_ip_list_builder.py b/tests/unit/scalarizrtests/handlers/test_ip_list_builder.py index a4e8caf..765de91 100644 --- a/tests/unit/scalarizrtests/handlers/test_ip_list_builder.py +++ b/tests/unit/scalarizrtests/handlers/test_ip_list_builder.py @@ -20,13 +20,13 @@ def list_roles(self, role_name=None, behaviour=None): name = "mysql-lvm", hosts = [_Bunch( index='1', - replication_master=True, + replication_main=True, internal_ip="127.0.0.1", external_ip="192.168.1.92" ), _Bunch( index='2', - replication_master=False, + replication_main=False, internal_ip="127.0.0.2", external_ip="192.168.1.93" ) @@ -47,9 +47,9 @@ def setUp(self): self.ip_lb = ip_list_builder.IpListBuilder() - def test_host_is_replication_master(self): - is_replication_master = self.ip_lb._host_is_replication_master('127.0.0.1', 'mysql-lvm') - self.assertTrue(is_replication_master) + def test_host_is_replication_main(self): + is_replication_main = self.ip_lb._host_is_replication_main('127.0.0.1', 'mysql-lvm') + self.assertTrue(is_replication_main) def _on_HostUpDown(self,internal_ip, prefix): role_alias = 'mysql' @@ -72,11 +72,11 @@ def _on_HostUpDown(self,internal_ip, prefix): self.assertFalse(os.path.exists(mysql_file)) def test_on_HostUpDown1(self): - self._on_HostUpDown(internal_ip = '127.0.0.1', prefix = "-master") + self._on_HostUpDown(internal_ip = '127.0.0.1', prefix = "-main") def test_on_HostUpDown2(self): - self._on_HostUpDown(internal_ip = '127.0.0.2', prefix = "-slave") + self._on_HostUpDown(internal_ip = '127.0.0.2', prefix = "-subordinate") if __name__ == "__main__": unittest.main() diff --git a/tests/unit/scalarizrtests/handlers/test_memcached.py b/tests/unit/scalarizrtests/handlers/test_memcached.py index f9ed4ce..15aa182 100644 --- a/tests/unit/scalarizrtests/handlers/test_memcached.py +++ b/tests/unit/scalarizrtests/handlers/test_memcached.py @@ -19,7 +19,7 @@ def list_roles(self, behaviour=None): return [_Bunch( behaviour = "app", name = "nginx", - hosts = [_Bunch(index='1',replication_master="1",internal_ip="8.8.8.8",external_ip="192.168.1.93")] + hosts = [_Bunch(index='1',replication_main="1",internal_ip="8.8.8.8",external_ip="192.168.1.93")] )] diff --git a/tests/unit/scalarizrtests/handlers/test_mysql.py b/tests/unit/scalarizrtests/handlers/test_mysql.py index c3d8e73..93d71f9 100644 --- a/tests/unit/scalarizrtests/handlers/test_mysql.py +++ b/tests/unit/scalarizrtests/handlers/test_mysql.py @@ -34,7 +34,7 @@ def send_message(self, message): self._messages.append(message) def _create_ebs_snapshot(self): pass - def _take_master_volume(self, volume_id, ec2_conn=None): + def _take_main_volume(self, volume_id, ec2_conn=None): return _Volume() LOCAL_IP = '12.34.56.78' @@ -116,7 +116,7 @@ def test_create_snapshot(self): self.assertEqual(log_file, true_log_file) self.assertEqual(log_pos, true_log_pos) file = open('/etc/mysql/farm-replication.cnf') - self.assertEqual('[mysqld]\nserver-id\t\t=\t1\nmaster-connect-retry\t\t=\t15\n', file.read()) + self.assertEqual('[mysqld]\nserver-id\t\t=\t1\nmain-connect-retry\t\t=\t15\n', file.read()) file.close() @@ -141,7 +141,7 @@ def test_on_before_host_up(self): self.assertEqual(datadir, '/mnt/dbstorage/mysql-data/') self.assertEqual(log_bin, '/mnt/dbstorage/mysql-misc/binlog.log') - def test_on_mysql_newmaster_up(self): + def test_on_mysql_newmain_up(self): bus.queryenv_service = _QueryEnv() bus.platform = _Platform() config = bus.config @@ -178,26 +178,26 @@ def test_on_mysql_newmaster_up(self): myclient.expect('mysql>') # retrieve log file and position try: - master_status = myclient.before.split('\r\n')[4].split('|') + main_status = myclient.before.split('\r\n')[4].split('|') except: - raise BaseException("Cannot get master status") + raise BaseException("Cannot get main status") finally: myclient.sendline('UNLOCK TABLES;') os.kill(myd.pid, signal.SIGTERM) myd = Popen([daemon, '--defaults-file=/etc/mysql2/my.cnf'], stdin=PIPE, stdout=PIPE, stderr=STDOUT) ping_service(LOCAL_IP, 3306, 5) - message.log_file = master_status[1].strip() - message.log_pos = master_status[2].strip() + message.log_file = main_status[1].strip() + message.log_pos = main_status[2].strip() message.repl_user = mysql.REPL_USER message.repl_password = repl_password message.root_password = root_pass - handler.on_Mysql_NewMasterUp(message) + handler.on_Mysql_NewMainUp(message) os.kill(myd.pid, signal.SIGTERM) initd.stop("mysql") system ('rm -rf /var/lib/mysql && cp -pr /var/lib/backmysql /var/lib/mysql && rm -rf /var/lib/backmysql') config.set(sect_name, mysql.OPT_REPLICATION_MASTER, '1') - def test_on_before_host_up_slave_ebs(self): + def test_on_before_host_up_subordinate_ebs(self): bus.queryenv_service = _QueryEnv() bus.platform = _Platform() message = _Message() @@ -212,7 +212,7 @@ def test_on_before_host_up_slave_ebs(self): self.assertEqual(datadir, '/mnt/dbstorage/mysql-data/') self.assertEqual(log_bin, '/mnt/dbstorage/mysql-misc/binlog.log') - def test_on_before_host_up_slave_eph(self): + def test_on_before_host_up_subordinate_eph(self): bus.queryenv_service = _QueryEnv() bus.platform = _Platform() message = _Message() @@ -227,7 +227,7 @@ def test_on_before_host_up_slave_eph(self): self.assertEqual(datadir, '/mnt/dbstorage/mysql-data/') self.assertEqual(log_bin, '/mnt/dbstorage/mysql-misc/binlog.log') - def test_on_Mysql_PromoteToMaster(self): + def test_on_Mysql_PromoteToMain(self): bus.queryenv_service = _QueryEnv() bus.platform = _Platform() config = bus.config @@ -238,7 +238,7 @@ def test_on_Mysql_PromoteToMaster(self): message.repl_password = '456' message.stat_password = '789' handler = _MysqlHandler() - handler.on_Mysql_PromoteToMaster(message) + handler.on_Mysql_PromoteToMain(message) def mysql_password(str): pass1 = hashlib.sha1(str).digest() @@ -268,7 +268,7 @@ def __init__(self): def list_role_params(self, role_name): return _Bunch( mysql_data_storage_engine = self.storage, - mysql_master_ebs_volume_id = 'test-id', + mysql_main_ebs_volume_id = 'test-id', ebs_snap_id = 'test_snap_id' ) @@ -290,7 +290,7 @@ def __init__(self): self.mysql_stat_password = None self.mysql_stat_user = None self.local_ip = LOCAL_IP - self.mysql_replication_master = 1 + self.mysql_replication_main = 1 if __name__ == "__main__": init_tests() diff --git a/tests/unit/scalarizrtests/handlers/test_mysql2.py b/tests/unit/scalarizrtests/handlers/test_mysql2.py index 87840bf..8abee50 100644 --- a/tests/unit/scalarizrtests/handlers/test_mysql2.py +++ b/tests/unit/scalarizrtests/handlers/test_mysql2.py @@ -4,11 +4,11 @@ import sys -eph_host_init_response_new_master = { +eph_host_init_response_new_main = { 'server_index': '1', 'db_type': 'percona', 'percona': { - 'replication_master': 1, + 'replication_main': 1, 'volume_config': { 'type': 'eph', }, @@ -21,11 +21,11 @@ } } -eph_host_init_response_respawn_master = { +eph_host_init_response_respawn_main = { 'server_index': '1', 'db_type': 'percona', 'percona': { - 'replication_master': 1, + 'replication_main': 1, 'volume_config': { 'type': 'eph', 'mpoint': '/mnt/dbstorage', @@ -43,12 +43,12 @@ } } -eph_host_init_response_slave = { +eph_host_init_response_subordinate = { 'local_ip': '10.146.34.58', 'remote_ip': '176.34.6.168', 'db_type': 'percona', 'percona': { - 'replication_master': 1, + 'replication_main': 1, 'volume_config': { 'type': 'eph', 'id': 'eph-vol-2a3bd1c8' @@ -60,13 +60,13 @@ } } -eph_new_master_up = { +eph_new_main_up = { 'behaviour': ['percona'], 'local_ip': '10.146.34.58', 'remote_ip': '176.34.6.168', 'db_type': 'percona', 'percona': { - 'replication_master': '1', + 'replication_main': '1', 'snapshot_config': {'type': 'eph'}, 'root_password': 'zcuDiVum9hDvx1v97Ac5', 'repl_password': 'cumLityXgnJv5JgaxmXA', @@ -84,7 +84,7 @@ def __init__(self, *args, **kwds): behavior=['percona']) def __setitem__(self, key, value): - if key == 'replication_master': + if key == 'replication_main': value = int(key) super(NodeMock, self).__setitem__(key, value) @@ -104,19 +104,19 @@ def __setitem__(self, key, value): class TestMysqlHandler(object): - def test_master_new(self, **kwds): + def test_main_new(self, **kwds): snapshot = mock.MagicMock( - name='master storage snapshot', + name='main storage snapshot', type='eph', id='eph-snap-12345678') restore = mock.Mock( - name='master restore', + name='main restore', type='snap_mysql', snapshot=snapshot, log_file='binlog.000003', log_pos='107') backup = mock.Mock( - name='master backup', + name='main backup', **{'run.return_value': restore}) kwds['backup'].configure_mock(return_value=backup) @@ -125,7 +125,7 @@ def test_master_new(self, **kwds): hdlr = mysql2.MysqlHandler() mock.patch.object(hdlr, '_storage_valid', return_value=False) - hir = mock.Mock(**eph_host_init_response_new_master) + hir = mock.Mock(**eph_host_init_response_new_main) host_up = mock.Mock() hdlr.on_host_init_response(hir) hdlr.on_before_host_up(host_up) @@ -147,7 +147,7 @@ def test_master_new(self, **kwds): assert host_up.db_type == 'percona' assert host_up.percona['log_file'] == restore.log_file assert host_up.percona['log_pos'] == restore.log_pos - assert int(host_up.percona['replication_master']) == 1 + assert int(host_up.percona['replication_main']) == 1 assert host_up.percona['root_password'] == __mysql__['root_password'] assert host_up.percona['repl_password'] == __mysql__['repl_password'] assert host_up.percona['stat_password'] == __mysql__['stat_password'] @@ -155,18 +155,18 @@ def test_master_new(self, **kwds): assert 'snapshot_config' in host_up.percona - def test_master_respawn(self, **kwds): + def test_main_respawn(self, **kwds): return from scalarizr.handlers import mysql2 hdlr = mysql2.MysqlHandler() mock.patch.object(hdlr, '_storage_valid', return_value=True) - hir = mock.Mock(**eph_host_init_response_respawn_master) + hir = mock.Mock(**eph_host_init_response_respawn_main) hdlr.on_host_init_response(hir) __mysql__ = mysql2.__mysql__ - assert (__mysql__['replication_master']) == 1 + assert (__mysql__['replication_main']) == 1 assert __mysql__['restore'] kwds['restore'].assert_called_with( type='snap_mysql', @@ -181,7 +181,7 @@ def test_master_respawn(self, **kwds): assert host_up.db_type == 'percona' assert host_up.percona['log_file'] assert host_up.percona['log_pos'] - assert int(host_up.percona['replication_master']) == 1 + assert int(host_up.percona['replication_main']) == 1 assert host_up.percona['root_password'] == __mysql__['root_password'] assert host_up.percona['repl_password'] == __mysql__['repl_password'] assert host_up.percona['stat_password'] == __mysql__['stat_password'] @@ -189,11 +189,11 @@ def test_master_respawn(self, **kwds): assert 'snapshot_config' in host_up.percona - def test_master_respawn_from_snapshot(self, **kwds): + def test_main_respawn_from_snapshot(self, **kwds): pass - def test_slave(self, **kwds): + def test_subordinate(self, **kwds): pass @@ -205,19 +205,19 @@ def test_create_backup(self, **kwds): pass - def test_slave_to_master(self, **kwds): + def test_subordinate_to_main(self, **kwds): pass - def test_new_master_up(self, **kwds): + def test_new_main_up(self, **kwds): pass class TestMysqlHandlerXtrabackup(object): - def test_master_new(self): + def test_main_new(self): pass - def test_master_respawn(self): + def test_main_respawn(self): pass diff --git a/tests/unit/scalarizrtests/handlers/test_nginx.py b/tests/unit/scalarizrtests/handlers/test_nginx.py index 02850af..d51238b 100644 --- a/tests/unit/scalarizrtests/handlers/test_nginx.py +++ b/tests/unit/scalarizrtests/handlers/test_nginx.py @@ -170,7 +170,7 @@ def _test_on_BeforeHostTerminate(self): role_host = RoleHost( index='1', - replication_master="1", + replication_main="1", internal_ip="8.8.8.8", external_ip="192.168.1.93") vhost = VirtualHost( diff --git a/tests/unit/scalarizrtests/services/test_mysql2.py b/tests/unit/scalarizrtests/services/test_mysql2.py index a774b57..08d5ffa 100644 --- a/tests/unit/scalarizrtests/services/test_mysql2.py +++ b/tests/unit/scalarizrtests/services/test_mysql2.py @@ -29,7 +29,7 @@ class TestMySQLSnapBackupAndRestore(object): def setup(self): self.bak = backup.backup(type='snap_mysql') mock.patch.object(self.bak, '_client').start() - self.bak._client.return_value.master_status.return_value = ('binlog.000003', '107') + self.bak._client.return_value.main_status.return_value = ('binlog.000003', '107') self.rst = backup.restore(type='snap_mysql') @@ -51,7 +51,7 @@ def test_freeze(self, *args, **kwds): self.bak.freeze(mock.Mock(), state) self.bak._client.return_value.lock_tables.assert_called_with() - self.bak._client.return_value.master_status.assert_called_with() + self.bak._client.return_value.main_status.assert_called_with() assert state == {'log_file': 'binlog.000003', 'log_pos': '107'} diff --git a/tests/unit/scalarizrtests/test_node.py b/tests/unit/scalarizrtests/test_node.py index 3eb8028..5603491 100644 --- a/tests/unit/scalarizrtests/test_node.py +++ b/tests/unit/scalarizrtests/test_node.py @@ -13,9 +13,9 @@ def test_get_plain_key(self): store = mock.MagicMock(spec=node.Store) store.__getitem__.return_value = 'aaa' - master = node.Compound({'plain_key': store}) + main = node.Compound({'plain_key': store}) - assert master['plain_key'] == 'aaa' + assert main['plain_key'] == 'aaa' store.__getitem__.assert_called_with('plain_key') @@ -27,24 +27,24 @@ def test_get_enum_key(self): store = mock.MagicMock(spec=node.Store) store.__getitem__.side_effect = lambda key: values[key] - master = node.Compound({'server_id,platform': store}) + main = node.Compound({'server_id,platform': store}) - eq_(master['server_id'], '14593') - eq_(master['platform'], 'ec2') + eq_(main['server_id'], '14593') + eq_(main['platform'], 'ec2') def test_set_enum_key(self): store = mock.MagicMock(spec=node.Store) - master = node.Compound({'mike,andru,luka': store}) + main = node.Compound({'mike,andru,luka': store}) - master['mike'] = 'story' + main['mike'] = 'story' store.__setitem__.assert_called_with('mike', 'story') def test_set_undefined_key(self): - master = node.Compound() + main = node.Compound() - master['key1'] = 'ooo' - assert master['key1'] == 'ooo' + main['key1'] = 'ooo' + assert main['key1'] == 'ooo' def test_update(self): @@ -54,14 +54,14 @@ def test_update(self): 'sub_1,sub_2': sub }) mysql.update({ - 'replication_master': '1', + 'replication_main': '1', 'sub_1': 'a value', 'sub_2': 'not bad' }) - assert 'replication_master' in mysql - assert mysql['replication_master'] == '1' + assert 'replication_main' in mysql + assert mysql['replication_main'] == '1' sub.__setitem__.call_args_list[0] = mock.call(mysql, 'sub_1', 'a value') sub.__setitem__.call_args_list[1] = mock.call(mysql, 'sub_2', 'not bad') diff --git a/tests/unit/scalarizrtests/test_queryenv.py b/tests/unit/scalarizrtests/test_queryenv.py index 3df749b..c64fbba 100644 --- a/tests/unit/scalarizrtests/test_queryenv.py +++ b/tests/unit/scalarizrtests/test_queryenv.py @@ -104,7 +104,7 @@ def test_list_roles(self): self.assertFalse(hosts is None) self.assertEqual(host.internal_ip, "211.31.14.198") self.assertEqual(host.external_ip, "211.31.14.198") - self.assertTrue(host.replication_master) + self.assertTrue(host.replication_main) self.assertEqual(host.index, 1) diff --git a/tests/unit/scalarizrtests/util/test_sqlite_server.py b/tests/unit/scalarizrtests/util/test_sqlite_server.py index 2c9a94c..e8238dd 100644 --- a/tests/unit/scalarizrtests/util/test_sqlite_server.py +++ b/tests/unit/scalarizrtests/util/test_sqlite_server.py @@ -122,7 +122,7 @@ def test_operate_closed_cursor(self): cur = CONN.cursor() cur.close() # XXX: This wasn't works. method silently dies by timeout - assert_raises(Exception, cur.execute, 'SELECT * FROM sqlite_master') + assert_raises(Exception, cur.execute, 'SELECT * FROM sqlite_main') '''