#!/usr/bin/env python
# Copyright (c) 2020 Klustron inc. All rights reserved.
# This source code is licensed under Apache 2.0 License,
# combined with Common Clause Condition 1.0, as detailed in the NOTICE file.

import sys
import json
import getpass
import platform
import argparse
from cluster_common import *

def generate_storage_service(config, machines, commandslist, node, idx, filesmap):
    mach = machines.get(node['ip'])
    storagedir = "kunlun-storage-%s" % config['product_version']
    fname = "%d-kunlun-storage-%d.service" % (idx, node['port'])
    servname = "kunlun-storage-%d" % node['port']
    fname_to = "kunlun-storage-%d.service" % node['port']
    servicef = open('install/%s' % fname, 'w')
    servicef.write("# kunlun-storage-%d systemd service file\n\n" % node['port'])
    servicef.write("[Unit]\n")
    servicef.write("Description=kunlun-storage-%d\n" % node['port'])
    servicef.write("After=network.target\n\n")
    servicef.write("[Install]\n")
    servicef.write("WantedBy=multi-user.target\n\n")
    servicef.write("[Service]\n")
    servicef.write("Type=forking\n")
    servicef.write("User=%s\n" % mach['user'])
    servicef.write("Restart=on-failure\n")
    servicef.write("WorkingDirectory=%s/%s/dba_tools\n" % (mach['basedir'], storagedir))
    servicef.write("ExecStart=/bin/bash startmysql.sh %d\n" % (node['port']))
    servicef.write("ExecStop=/bin/bash stopmysql.sh %d\n" % (node['port']))
    servicef.close()
    addNodeToFilesMap(filesmap, node, fname, './%s' % fname_to)
    addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname_to)
    addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)

def generate_server_service(config, machines, commandslist, node, idx, filesmap):
    mach = machines.get(node['ip'])
    serverdir = "kunlun-server-%s" % config['product_version']
    fname = "%d-kunlun-server-%d.service" % (idx, node['port'])
    servname = "kunlun-server-%d" % node['port']
    fname_to = "kunlun-server-%d.service" % node['port']
    servicef = open('install/%s' % fname, 'w')
    servicef.write("# kunlun-server-%d systemd service file\n\n" % node['port'])
    servicef.write("[Unit]\n")
    servicef.write("Description=kunlun-server-%d\n" % node['port'])
    servicef.write("After=network.target\n\n")
    servicef.write("[Install]\n")
    servicef.write("WantedBy=multi-user.target\n\n")
    servicef.write("[Service]\n")
    servicef.write("Type=forking\n")
    servicef.write("User=%s\n" % mach['user'])
    servicef.write("Restart=on-failure\n")
    servicef.write("WorkingDirectory=%s/%s/scripts\n" % (mach['basedir'], serverdir))
    servicef.write("ExecStart=/usr/bin/python2 start_pg.py --port=%d\n" % (node['port']))
    servicef.close()
    addNodeToFilesMap(filesmap, node, fname, './%s' % fname_to)
    addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname_to)
    addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)

def generate_clustermgr_service(config, machines, commandslist, node, idx, filesmap):
    mach = machines.get(node['ip'])
    clustermgrdir = "kunlun-cluster-manager-%s" % config['product_version']
    fname = "%d-kunlun-cluster-manager-%d.service" % (idx, node['brpc_raft_port'])
    servname = "kunlun-cluster-manager-%d" % node['brpc_raft_port']
    fname_to = "kunlun-cluster-manager-%d.service" % node['brpc_raft_port']
    servicef = open('install/%s' % fname, 'w')
    servicef.write("# kunlun-cluster-manager-%d systemd service file\n\n" % node['brpc_raft_port'])
    servicef.write("[Unit]\n")
    servicef.write("Description=kunlun-cluster-manager-%d\n" % node['brpc_raft_port'])
    servicef.write("After=network.target\n\n")
    servicef.write("[Install]\n")
    servicef.write("WantedBy=multi-user.target\n\n")
    servicef.write("[Service]\n")
    servicef.write("Type=forking\n")
    servicef.write("User=%s\n" % mach['user'])
    servicef.write("Restart=on-failure\n")
    servicef.write("WorkingDirectory=%s/%s/bin\n" % (mach['basedir'], clustermgrdir))
    servicef.write("ExecStart=/bin/bash start_cluster_mgr.sh\n")
    servicef.write("ExecStop=/bin/bash stop_cluster_mgr.sh\n")
    servicef.close()
    addNodeToFilesMap(filesmap, node, fname, './%s' % fname_to)
    addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname_to)
    addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)

def generate_haproxy_service(config, machines, commandslist, node, filesmap):
    mach = machines.get(node['ip'])
    fname = "kunlun-haproxy-%d.service" % (node['port'])
    servname = "kunlun-haproxy-%d" % node['port']
    servicef = open('install/%s' % fname, 'w')
    servicef.write("# Kunlun-HAProxy-%d systemd service file\n\n" % node['port'])
    servicef.write("[Unit]\n")
    servicef.write("Description=Kunlun-HAProxy-%d\n" % node['port'])
    servicef.write("After=network.target\n\n")
    servicef.write("[Install]\n")
    servicef.write("WantedBy=multi-user.target\n\n")
    servicef.write("[Service]\n")
    servicef.write("Type=forking\n")
    servicef.write("User=%s\n" % mach['user'])
    servicef.write("Restart=on-failure\n")
    servicef.write("WorkingDirectory=%s\n" % (mach['basedir'],))
    servicef.write("ExecStart=%s/haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg\n" % (mach['basedir'],))
    servicef.close()
    addNodeToFilesMap(filesmap, node, fname, '.')
    addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname)
    addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)

def generate_install_scripts(jscfg, args):
    init_global_config1(jscfg, args)
    machines = {}
    setup_machines1(jscfg, machines)
    validate_and_set_config1(jscfg, machines)
    config = jscfg['config']

    storagedir = "kunlun-storage-%s" % config['product_version']
    serverdir = "kunlun-server-%s" % config['product_version']
    clustermgrdir = "kunlun-cluster-manager-%s" % config['product_version']
    iscloud = config['cloud'] 
    autostart = config['autostart']
    usesudo = config['sudo']

    valgrindopt = ""
    if config['valgrind']:
        valgrindopt = "--valgrind"

    filesmap = {}
    commandslist = []
    dirmap = {}

    cluster = jscfg['cluster']
    cluster_name = cluster['name']
    meta = cluster['meta']
    datas = cluster['data']

    if not 'group_uuid' in meta:
	    meta['group_uuid'] = getuuid()
    meta_extraopt = " --ha_mode=%s" % meta['ha_mode']

    my_metaname = 'mysql_meta.json'
    metaf = open(r'install/%s' % my_metaname,'w')
    json.dump(meta, metaf, indent=4)
    metaf.close()

    cfgpat_st =  "bash %s/change_config.sh %s '%s' '%s'"
    cmdpat_base = 'python2 install-mysql.py --config=./%s --target_node_index=%d --cluster_id=%s --shard_id=%s --server_id=%d'
    template_file = "./template-meta.cnf"
    if config['small']:
        template_file = "./template-meta-small.cnf"
    cmdpat = cmdpat_base + ' --dbcfg=%s' % template_file
    # commands like:
    # python2 install-mysql.py --config=./mysql_meta.json --target_node_index=0 --server_id=[int]
    targetdir='%s/dba_tools' % storagedir
    i=0
    storageidx = 0
    mpries = []
    msecs = []
    shard_id = "meta"
    meta_addrs = []
    for node in meta['nodes']:
        mach = machines.get(node['ip'])
        configmap = get_meta_config(node, meta)
        meta_addrs.append("%s:%s" % (node['ip'], str(node['port'])))
        addNodeToFilesMap(filesmap, node, my_metaname, targetdir)
        if not meta['enable_rocksdb']:
            addToCommandsList(commandslist, node['ip'], targetdir, 'sed -i /rocksdb/d %s' % template_file)
        cnf_file = "template_%d.cnf" % node['port']
        cmdpat = cmdpat_base + ' --dbcfg=%s' % cnf_file
        addToCommandsList(commandslist, node['ip'], targetdir, 'cp -f %s %s' %(template_file, cnf_file))
        for confkey in configmap:
            addToCommandsList(commandslist, node['ip'], targetdir,
                cfgpat_st % (mach['basedir'], cnf_file, confkey, str(configmap[confkey])))
        cmd = cmdpat % (my_metaname, i, cluster_name, shard_id, i+1)
        if node.get('is_primary', False):
            mpries.append([node['ip'], targetdir, cmd])
        else:
            msecs.append([node['ip'], targetdir, cmd])
        addToDirMap(dirmap, node['ip'], node['data_dir_path'])
        addToDirMap(dirmap, node['ip'], node['log_dir_path'])
        addToDirMap(dirmap, node['ip'], node['innodb_log_dir_path'])
        addToDirMap(dirmap, node['ip'], node['tornado_sn_data_dir'])
        if autostart:
            generate_storage_service(config, machines, commandslist, node, storageidx, filesmap)
        i+=1
        storageidx += 1

    pries = []
    secs = []
    shard_extraopt = " --ha_mode=%s" % cluster['ha_mode']
    i=1
    template_file = "./template.cnf"
    if config['small']:
        template_file = "./template-small.cnf"
    cmdpat = cmdpat_base + ' --dbcfg=%s' % template_file
    for shard in datas:
        if not 'group_uuid' in shard:
            shard['group_uuid'] = getuuid()
        shard_id = "shard%d" % i
        my_shardname = "mysql_shard%d.json" % i
        shardf = open(r'install/%s' % my_shardname, 'w')
        json.dump(shard, shardf, indent=4)
        shardf.close()
        j = 0
        for node in shard['nodes']:
            mach = machines.get(node['ip'])
            configmap = get_data_config(node, shard, cluster)
            addNodeToFilesMap(filesmap, node, my_shardname, targetdir)
            if not cluster['enable_rocksdb']:
                addToCommandsList(commandslist, node['ip'], targetdir, 'sed -i /rocksdb/d %s' % template_file)
            cnf_file = "template_%d.cnf" % node['port']
            cmdpat = cmdpat_base + ' --dbcfg=%s' % cnf_file
            addToCommandsList(commandslist, node['ip'], targetdir, 'cp -f %s %s' %(template_file, cnf_file))
            for confkey in configmap:
                addToCommandsList(commandslist, node['ip'], targetdir,
                    cfgpat_st % (mach['basedir'], cnf_file, confkey, str(configmap[confkey])))
            cmd = cmdpat % (my_shardname, j, cluster_name, shard_id, j+1)
            if node.get('is_primary', False):
                pries.append([node['ip'], targetdir, cmd])
            else:
                secs.append([node['ip'], targetdir, cmd])
            addToDirMap(dirmap, node['ip'], node['data_dir_path'])
            addToDirMap(dirmap, node['ip'], node['log_dir_path'])
            addToDirMap(dirmap, node['ip'], node['innodb_log_dir_path'])
            addToDirMap(dirmap, node['ip'], node['tornado_sn_data_dir'])
            if autostart:
                generate_storage_service(config, machines, commandslist, node, storageidx, filesmap)
            j += 1
            storageidx += 1
        i+=1

    for item in mpries:
        addToCommandsList(commandslist, item[0], item[1], item[2] + meta_extraopt, "storage")
    for item in pries:
        addToCommandsList(commandslist, item[0], item[1], item[2] + shard_extraopt, "storage")
    for item in msecs:
        addToCommandsList(commandslist, item[0], item[1], item[2] + meta_extraopt, "storage")
    for item in secs:
        addToCommandsList(commandslist, item[0], item[1], item[2] + shard_extraopt, "storage")

    comps = cluster['comp']['nodes']
    pg_compname = 'postgres_comp.json'
    compf = open(r'install/%s' % pg_compname, 'w')
    json.dump(comps, compf, indent=4)
    compf.close()

    # python2 install_pg.py --config=docker-comp.json --install_ids=1,2,3
    cfgpat_comp =  "bash %s/change_config.sh %s %s %s"
    targetdir="%s/scripts" % serverdir
    for node in comps:
        configmap = get_comp_config(node, cluster['comp'])
        addNodeToFilesMap(filesmap, node, pg_compname, targetdir)
        conf_file = "postgresql_%d.conf" % node['port']
        addToCommandsList(commandslist, node['ip'], targetdir, "cp -f ../resources/postgresql.conf %s" % conf_file)
        for confkey in configmap:
            addToCommandsList(commandslist, node['ip'], targetdir,
                cfgpat_comp % (mach['basedir'], conf_file, confkey, str(configmap[confkey])))
        cmdpat = r'python2 install_pg.py  --config=./%s --install_ids=%d --template=%s'
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (pg_compname, node['id'], conf_file))
        addToDirMap(dirmap, node['ip'], node['datadir'])
        addToDirMap(dirmap, node['ip'], node['tornado_cn.data_dir'])

    # This only needs to transfered to machine creating the cluster.
    reg_metaname = 'reg_meta.json'
    metaf = open(r'install/%s' % reg_metaname, 'w')
    objs = []
    for node in meta['nodes']:
        obj = {}
        obj['is_primary'] = node.get('is_primary', False)
        obj['data_dir_path'] = node['data_dir_path']
        obj['ip'] = node['ip']
        obj['port'] = node['port']
        obj['user'] = "pgx"
        obj['password'] = "pgx_pwd"
        if 'master_priority' in node:
            obj['master_priority'] = node['master_priority']
        objs.append(obj)
    json.dump(objs, metaf, indent=4)
    metaf.close()

    # This only needs to transfered to machine creating the cluster.
    reg_shardname = 'reg_shards.json'
    shardf = open(r'install/%s' % reg_shardname, 'w')
    shards = []
    i=1
    for shard in datas:
        obj={'shard_name': "shard%d" % i}
        i+=1
        nodes=[]
        for node in shard['nodes']:
            n={'user':'pgx', 'password':'pgx_pwd'}
            n['ip'] = node['ip']
            n['port'] = node['port']
            if 'ro_weight' in node:
                n['ro_weight'] = node['ro_weight']
            if 'master_priority' in node:
                n['master_priority'] = node['master_priority']
            nodes.append(n)
        obj['shard_nodes'] = nodes
        shards.append(obj)
    json.dump(shards, shardf, indent=4)
    shardf.close()

    comp1 = comps[0]
    addNodeToFilesMap(filesmap, comp1, reg_metaname, targetdir)
    addNodeToFilesMap(filesmap, comp1, reg_shardname, targetdir)
    cmdpat=r'python2 bootstrap.py --config=./%s --bootstrap_sql=./meta_inuse.sql' + meta_extraopt
    if config['mariadb']:
        cmdpat=r'python2 bootstrap.py --config=./%s --bootstrap_sql=./meta_inuse_mariadb.sql' + meta_extraopt
    addToCommandsList(commandslist, comp1['ip'], targetdir, cmdpat % reg_metaname, "storage")
    cmdpat='python2 create_cluster.py --shards_config=./%s \
--comps_config=./%s  --meta_config=./%s --cluster_name=%s --meta_ha_mode=%s --ha_mode=%s --cluster_owner=abc --cluster_biz=%s'
    addToCommandsList(commandslist, comp1['ip'], targetdir,
        cmdpat % (reg_shardname, pg_compname, reg_metaname, cluster_name, meta['ha_mode'], cluster['ha_mode'], cluster_name), "all")

    # bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid clustermgr.cnf >& run.log </dev/null &
    clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
    metaseeds=",".join(meta_addrs)
    clmgrcnf = "%s/conf/cluster_mgr.cnf" % clustermgrdir
    cmdpat = "bash change_config.sh %s '%s' '%s'"
    startpat = 'bash start_cluster_mgr.sh </dev/null >& start.log &'
    initmember = "%s:%d:0," % (clmgrnodes[0]['ip'], clmgrnodes[0]['brpc_raft_port'])
    clustermgridx = 0
    for node in clmgrnodes:
        addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'meta_group_seeds', metaseeds))
        addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'brpc_raft_port', node['brpc_raft_port']))
        addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'brpc_http_port', node['brpc_http_port']))
        addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'local_ip', node['ip']))
        addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'raft_group_member_init_config', initmember))
        addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, startpat)
        if autostart:
            generate_clustermgr_service(config, machines, commandslist, node, clustermgridx, filesmap)
        clustermgridx += 1

    haproxy = cluster.get("haproxy", None)
    if haproxy is not None:
        generate_haproxy_config(jscfg['cluster'], machines, 'install', 'haproxy.cfg')
        cmdpat = r'haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg >& haproxy.log'
        addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
        if autostart:
            generate_haproxy_service(config, machines, commandslist, haproxy, filesmap)

    initobj = cluster.get("initialization", None)
    initfile = "auto_init.sql"
    if initobj is not None:
        initsqlf = open("install/%s" % initfile, 'w')
        for sqlc in initobj.get("sqlcommands", []):
            initsqlf.write(sqlc)
            initsqlf.write(";\n")
        initsqlf.close()
        node = comps[0]
        waitTime = initobj.get("waitseconds", 10)
        addNodeToFilesMap(filesmap, node, initfile, ".")
        cmdpat = r'sleep %s; psql -f %s postgres://%s:%s@%s:%s/postgres'
        addToCommandsList(commandslist, node['ip'], ".",
            cmdpat % (str(waitTime), initfile, node['user'], node['password'], 'localhost', str(node['port'])), "computing")

    com_name = 'commands.sh'
    comf = open(r'install/%s' % com_name, 'w')
    comf.write('#! /bin/bash\n')
    comf.write("cat /dev/null > runlog\n")
    comf.write("cat /dev/null > lastlog\n")
    comf.write("trap 'cat lastlog' DEBUG\n")
    comf.write("trap 'exit 1' ERR\n")

    # files copy.
    for ip in machines:
        mach = machines.get(ip)
        if usesudo:
            process_command_noenv(comf, config, machines, ip, '/',
                'sudo mkdir -p %s && sudo chown -R %s:\`id -gn %s\` %s' % (mach['basedir'],
                    mach['user'], mach['user'], mach['basedir']))
        else:
            process_command_noenv(comf, config, machines, ip, '/', 'mkdir -p %s' % mach['basedir'])
	# Set up the files
        if not iscloud:
            process_file(comf, config, machines, ip, '%s.tgz' % storagedir, mach['basedir'])
            process_file(comf, config, machines, ip, '%s.tgz' % serverdir, mach['basedir'])
            process_file(comf, config, machines, ip, '%s.tgz' % clustermgrdir, mach['basedir'])
            process_command_noenv(comf, config, machines, ip, mach['basedir'], 'tar -xzf %s.tgz' % storagedir)
            process_command_noenv(comf, config, machines, ip, mach['basedir'], 'tar -xzf %s.tgz' % serverdir)
            process_command_noenv(comf, config, machines, ip, mach['basedir'], 'tar -xzf %s.tgz' % clustermgrdir)
            if 'haproxy' in cluster:
                process_file(comf, config, machines, ip, 'haproxy-2.5.0-bin.tar.gz', mach['basedir'])
                process_command_noenv(comf, config, machines, ip, mach['basedir'], 'tar -xzf haproxy-2.5.0-bin.tar.gz')

	# files
        flist = [
                    ['build_driver_forpg.sh', '%s/resources' % serverdir],
                    ['build_driver_formysql.sh', '%s/resources' % storagedir],
                    [reg_metaname, '%s/scripts' % serverdir],
                    ['process_deps.sh', '.'],
                    ['change_config.sh', '.']
                ]
        for fpair in flist:
            process_file(comf, config, machines, ip, 'install/%s' % fpair[0], "%s/%s" % (mach['basedir'], fpair[1]))
        if 'haproxy' in cluster:
            process_file(comf, config, machines, ip, 'install/haproxy.cfg', mach['basedir'])

	# Set up the env.sh, this must be before 'process_command_setenv'
        process_file(comf, config, machines, ip, 'install/env.sh.template', mach['basedir'])
        extstr = "sed -s 's#KUNLUN_BASEDIR#%s#g' env.sh.template > env.sh" % mach['basedir']
        process_command_noenv(comf, config, machines, ip, mach['basedir'], extstr)
        extstr = "sed -i 's#KUNLUN_VERSION#%s#g' env.sh" % config['product_version']
        process_command_noenv(comf, config, machines, ip, mach['basedir'], extstr)

        comstr = "bash ../../process_deps.sh"
        process_command_setenv(comf, config, machines, ip, "%s/lib" % storagedir, comstr, "storage")
        process_command_setenv(comf, config, machines, ip, "%s/lib" % serverdir, comstr, "computing")

        comstr = "bash build_driver_formysql.sh %s"
        process_command_setenv(comf, config, machines, ip, "%s/resources" % storagedir, comstr % mach['basedir'], "storage")
        comstr = "bash build_driver_forpg.sh %s"
        process_command_setenv(comf, config, machines, ip, "%s/resources" % serverdir, comstr % mach['basedir'], "all")
 
        comstr = "cd %s || exit 1; test -d etc && echo > etc/instances_list.txt 2>/dev/null; exit 0" % serverdir
        process_command_noenv(comf, config, machines, ip, mach['basedir'], comstr)
        comstr = "cd %s || exit 1; test -d etc && echo > etc/instances_list.txt 2>/dev/null; exit 0" % storagedir
        process_command_noenv(comf, config, machines, ip, mach['basedir'], comstr)

    # dir making
    process_dirmap(comf, dirmap, machines, usesudo, config)
    # files copy.
    process_filesmap(comf, filesmap, machines, 'install', config)
    # The reason for not using commands map is that, we need to keep the order for the commands.
    process_commandslist_setenv(comf, config, machines, commandslist)
    output_info(comf, "Installation completed !")
    comf.close()

def generate_systemctl_start(servname, ip, commandslist):
    syscmdpat1 = "sudo systemctl start %s"
    addToCommandsList(commandslist, ip, '/', syscmdpat1 % servname)

# The order is meta shard -> data shards -> cluster_mgr -> comp nodes
def generate_start_scripts(jscfg, args):
    init_global_config1(jscfg, args)
    machines = {}
    setup_machines1(jscfg, machines)
    validate_and_set_config1(jscfg, machines)
    config = jscfg['config']

    storagedir = "kunlun-storage-%s" % config['product_version']
    serverdir = "kunlun-server-%s" % config['product_version']
    clustermgrdir = "kunlun-cluster-manager-%s" % config['product_version']
    autostart = config['autostart']
    usesudo = config['sudo']

    valgrindopt = ""
    if config['valgrind']:
        valgrindopt = "--valgrind"

    filesmap = {}
    commandslist = []
    
    cluster = jscfg['cluster']
    meta = cluster['meta']
    # commands like:
    # bash startmysql.sh [port]
    targetdir='%s/dba_tools' % storagedir
    for node in meta['nodes']:
        cmdpat = r'bash startmysql.sh %s'
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")

    # bash startmysql.sh [port]
    targetdir='%s/dba_tools' % storagedir
    datas = cluster['data']
    for shard in datas:
	    for node in shard['nodes']:
                cmdpat = r'bash startmysql.sh %s'
                addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
    
    clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
    cmdpat = r'bash start_cluster_mgr.sh </dev/null >& run.log &'
    for node in clmgrnodes:
        addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, cmdpat)

    # su postgres -c "python2 start_pg.py --port=5401"
    comps = cluster['comp']['nodes']
    targetdir="%s/scripts" % serverdir
    for node in comps:
        cmdpat = r'python2 start_pg.py --port=%d %s'
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['port'], valgrindopt), "computing")

    haproxy = cluster.get("haproxy", None)
    if haproxy is not None:
        cmdpat = r'haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg >& haproxy.log'
        addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)

    com_name = 'commands.sh'
    comf = open(r'start/%s' % com_name, 'w')
    comf.write('#! /bin/bash\n')
    comf.write("cat /dev/null > runlog\n")
    comf.write("cat /dev/null > lastlog\n")
    comf.write("trap 'cat lastlog' DEBUG\n")
    process_commandslist_setenv(comf, config, machines, commandslist)
    output_info(comf, "Start action completed !")
    comf.close()

def generate_systemctl_stop(servname, ip, commandslist):
    syscmdpat1 = "sudo systemctl stop %s"
    addToCommandsList(commandslist, ip, '/', syscmdpat1 % servname)

# The order is: comp-nodes -> cluster_mgr -> data shards -> meta shard
def generate_stop_scripts(jscfg, args):
    init_global_config1(jscfg, args)
    machines = {}
    setup_machines1(jscfg, machines)
    validate_and_set_config1(jscfg, machines)
    config = jscfg['config']

    storagedir = "kunlun-storage-%s" % config['product_version']
    serverdir = "kunlun-server-%s" % config['product_version']
    clustermgrdir = "kunlun-cluster-manager-%s" % config['product_version']
    autostart = config['autostart']
    usesudo = config['sudo']

    commandslist = []
    cluster = jscfg['cluster']

    haproxy = cluster.get("haproxy", None)
    if haproxy is not None:
        cmdpat="cat haproxy-%d.pid | xargs kill -9" % haproxy['port']
        addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)

    # pg_ctl -D %s stop"
    comps = cluster['comp']['nodes']
    targetdir="%s/scripts" % serverdir
    for node in comps:
        cmdpat = r'pg_ctl -D %s stop -m immediate'
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['datadir'], "computing")

    clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
    cmdpat = r'bash stop_cluster_mgr.sh'
    for node in clmgrnodes:
        addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, cmdpat)

    # bash stopmysql.sh [port]
    targetdir='%s/dba_tools' % storagedir
    datas = cluster['data']
    for shard in datas:
	    for node in shard['nodes']:
                cmdpat = r'bash stopmysql.sh %d'
                addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")

    meta = cluster['meta']
    # commands like:
    # mysqladmin --defaults-file=/kunlun/kunlun-storage-$version/etc/my_6001.cnf -uroot -proot shutdown
    targetdir='%s/dba_tools' % storagedir
    for node in meta['nodes']:
        cmdpat = r'bash stopmysql.sh %d'
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
    
    com_name = 'commands.sh'
    comf = open(r'stop/%s' % com_name, 'w')
    comf.write('#! /bin/bash\n')
    comf.write("cat /dev/null > runlog\n")
    comf.write("cat /dev/null > lastlog\n")
    comf.write("trap 'cat lastlog' DEBUG\n")
    process_commandslist_setenv(comf, config, machines, commandslist)
    output_info(comf, "Stop action completed !")
    comf.close()

def generate_systemctl_clean(servname, ip, commandslist):
    syscmdpat1 = "sudo systemctl stop %s"
    syscmdpat2 = "sudo systemctl disable %s"
    syscmdpat3 = "sudo rm -f /usr/lib/systemd/system/%s"
    addToCommandsList(commandslist, ip, '/', syscmdpat1 % servname)
    addToCommandsList(commandslist, ip, '/', syscmdpat2 % servname)
    addToCommandsList(commandslist, ip, '/', syscmdpat3 % servname)

# The order is: comp-nodes -> cluster_mgr -> data shards -> meta shard
def generate_clean_scripts(jscfg, args):
    init_global_config1(jscfg, args)
    machines = {}
    setup_machines1(jscfg, machines)
    validate_and_set_config1(jscfg, machines)
    config = jscfg['config']

    storagedir = "kunlun-storage-%s" % config['product_version']
    serverdir = "kunlun-server-%s" % config['product_version']
    clustermgrdir = "kunlun-cluster-manager-%s" % config['product_version']
    autostart = config['autostart']
    usesudo = config['sudo']

    sudopfx=""
    if usesudo:
        sudopfx="sudo "
    iscloud = config['cloud']

    env_cmdlist = []
    noenv_cmdlist = []
    cluster = jscfg['cluster']

    haproxy = cluster.get("haproxy", None)
    if haproxy is not None:
        cmdpat="cat haproxy-%d.pid | xargs kill -9" % haproxy['port']
        addToCommandsList(noenv_cmdlist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
        cmdpat="rm -f haproxy-%d.pid"  % haproxy['port']
        addToCommandsList(noenv_cmdlist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
        if autostart:
            servname = 'kunlun-haproxy-%d.service' % haproxy['port']
            generate_systemctl_clean(servname, haproxy['ip'], noenv_cmdlist)

    # pg_ctl -D %s stop"
    comps = cluster['comp']['nodes']
    targetdir="%s/scripts" % serverdir
    for node in comps:
        cmdpat = r'pg_ctl -D %s stop -m immediate'
        addToCommandsList(env_cmdlist, node['ip'], targetdir, cmdpat % node['datadir'], "computing")
        cmdpat = r'%srm -fr %s'
        addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['datadir']))
        addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['tornado_cn.data_dir']))
        if autostart:
            servname = 'kunlun-server-%d.service' % node['port']
            generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)

    clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
    cmdpat = r'bash stop_cluster_mgr.sh'
    for node in clmgrnodes:
        addToCommandsList(env_cmdlist, node['ip'], "%s/bin" % clustermgrdir, cmdpat)
        if autostart:
            servname = 'kunlun-cluster-manager-%d.service' % node['brpc_raft_port']
            generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)

    # bash stopmysql.sh [port]
    targetdir='%s/dba_tools' % storagedir
    datas = cluster['data']
    for shard in datas:
	    for node in shard['nodes']:
                cmdpat = r'bash stopmysql.sh %d'
                addToCommandsList(env_cmdlist, node['ip'], targetdir, cmdpat % node['port'], "storage")
                cmdpat = r'%srm -fr %s'
                addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['log_dir_path']))
                addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['data_dir_path']))
                addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['innodb_log_dir_path']))
                addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['tornado_sn_data_dir']))
                if autostart:
                    servname = 'kunlun-storage-%d.service' % node['port']
                    generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)

    meta = cluster['meta']
    # commands like:
    # mysqladmin --defaults-file=/kunlun/kunlun-storage-$version/etc/my_6001.cnf -uroot -proot shutdown
    targetdir='%s/dba_tools' % storagedir
    for node in meta['nodes']:
        cmdpat = r'bash stopmysql.sh %d'
        addToCommandsList(env_cmdlist, node['ip'], targetdir, cmdpat % node['port'], "storage")
        cmdpat = r'%srm -fr %s'
        addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['log_dir_path']))
        addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['data_dir_path']))
        addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['innodb_log_dir_path']))
        addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['tornado_sn_data_dir']))
        if autostart:
            servname = 'kunlun-storage-%d.service' % node['port']
            generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)

    if not iscloud:
        for ip in machines:
            mach =machines[ip]
            cmdpat = '%srm -fr %s/*'
            addToCommandsList(noenv_cmdlist, ip, ".", cmdpat % (sudopfx, mach['basedir']))

    com_name = 'commands.sh'
    comf = open(r'clean/%s' % com_name, 'w')
    comf.write('#! /bin/bash\n')
    comf.write("cat /dev/null > runlog\n")
    comf.write("cat /dev/null > lastlog\n")
    comf.write("trap 'cat lastlog' DEBUG\n")
    process_commandslist_setenv(comf, config, machines, env_cmdlist)
    process_commandslist_noenv(comf, config, machines, noenv_cmdlist)
    output_info(comf, "Clean action completed !")
    comf.close()

# The order is meta shard -> data shards -> cluster_mgr -> comp nodes
def generate_check_scripts(jscfg, args):
    init_global_config1(jscfg, args)
    machines = {}
    setup_machines1(jscfg, machines)
    validate_and_set_config1(jscfg, machines)
    config = jscfg['config']

    storagedir = "kunlun-storage-%s" % config['product_version']
    serverdir = "kunlun-server-%s" % config['product_version']
    clustermgrdir = "kunlun-cluster-manager-%s" % config['product_version']
    autostart = config['autostart']
    usesudo = config['sudo']

    filesmap = {}
    commandslist = []

    cluster = jscfg['cluster']
    meta = cluster['meta']
    metacnt = len(meta['nodes'])
    meta_hamode = cluster['meta']['ha_mode']
    cluster_hamode = cluster['ha_mode']

    # commands like:
    # bash check_storage.sh [host] [port] [ha_mode]
    targetdir='.'

    cmdpat = r'bash check_storage.sh %s %s %s'
    for node in meta['nodes']:
        addNodeToFilesMap(filesmap, node, 'check/check_storage.sh', targetdir)
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['ip'], str(node['port']), meta_hamode), "storage")

    datas = cluster['data']
    for shard in datas:
	    for node in shard['nodes']:
                addNodeToFilesMap(filesmap, node, 'check/check_storage.sh', targetdir)
                addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['ip'], str(node['port']), cluster_hamode), "storage")

    # commands like:
    # bash check_cluster_manager.sh [basedir]
    clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
    cmdpat = r'bash check_cluster_manager.sh %s'
    for node in clmgrnodes:
        addNodeToFilesMap(filesmap, node, 'check/check_cluster_manager.sh', targetdir)
        mach = machines.get(node['ip'])
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (mach['basedir']), "clustermgr")

    # commands like
    # bash check_server.sh [port] [user] [password]
    comps = cluster['comp']['nodes']
    cmdpat=r'bash check_server.sh %s %s %s'
    for node in comps:
        addNodeToFilesMap(filesmap, node, 'check/check_server.sh', targetdir)
        addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['port'], node['user'], node['password']), "computing")

    com_name = 'commands.sh'
    comf = open(r'check/%s' % com_name, 'w')
    comf.write('#! /bin/bash\n')
    comf.write("cat /dev/null > runlog\n")
    comf.write("cat /dev/null > lastlog\n")
    comf.write("trap 'cat lastlog' DEBUG\n")

    # files copy.
    process_filesmap(comf, filesmap, machines, '.', config)
    process_commandslist_setenv(comf, config, machines, commandslist)
    output_info(comf, "Check action completed !")
    comf.close()

# this should be put in contrib/x86_64
# If it is a gzip for docker image, the second item is the image name
# if it is a gzip for a directory, the second item is the dir after decompressed.
def get_x86_64_3rdpackages_filemap():
    return {
            "haproxy": ["haproxy-2.5.0-bin.tar.gz", "haproxy-2.5.0-bin"]
            }

def get_aarch64_3rdpackages_filemap():
    return {
            "haproxy": ["haproxy-2.5.0-bin.tar.gz", "haproxy-2.5.0-bin"]
            }

def get_arch_3rdpackages_filemap(config):
    arch = config['targetarch']
    if arch == 'x86_64':
        return get_x86_64_3rdpackages_filemap()
    elif arch == 'aarch64':
        return get_aarch64_3rdpackages_filemap()
    else: # not ready for loongarch64, etc
        raise ValueError('bad arch: %s' % arch)

def download_packages(args, jscfg = None):
    if jscfg is None:
        if args.config == '':
            jscfg = {}
        else:
            jscfg = get_json_from_file(args.config)
    init_global_config1(jscfg, args)
    config = jscfg['config']
    arch = config['targetarch']
    prodver = config['product_version']
    downtype = config['downloadtype']
    ispro = config['professional']
    contentTypes = set()
    downbase = get_downloadbase(config['downloadsite'])
    targetdir="."
    contentTypes.add('application/x-gzip')
    contentTypes.add('application/octet-stream')
    contentTypes.add('unknown')
    pkgtype = "enterprise"
    if ispro:
        pkgtype = "professional"
    binarynames = ["kunlun-storage", "kunlun-server", "kunlun-cluster-manager"]
    # download the binary packages
    for name in binarynames:
        fname = "%s-%s.tgz" % (name, prodver)
        if downtype == 'release':
            fpath = "releases_%s/%s/release-binaries/%s" % (arch, prodver, pkgtype)
        elif downtype == 'daily_rel':
            fpath = "dailybuilds_%s/%s" % (arch, pkgtype)
        else:
            fpath = "dailybuilds_debug_%s/%s" % (arch, pkgtype)
        if args.mariadb and name == 'kunlun-storage':
            fpath = fpath + "/mariadb"
        fpath = fpath + "/" + fname
        download_file(downbase, fpath, contentTypes, targetdir, config['overwrite'])
    archmap = get_arch_3rdpackages_filemap(config)
    for pkgname in archmap:
        finfo = archmap[pkgname]
        fpath = 'contrib/%s/%s' % (arch, finfo[0])
        download_file(downbase, fpath, contentTypes, targetdir, config['overwrite'])

def run_command(args):
    jscfg = get_json_from_file(args.config)
    init_global_config1(jscfg, args)
    machines = {}
    setup_machines1(jscfg, machines)
    validate_and_set_config1(jscfg, machines)
    cluster = jscfg['cluster']
    metaobj = cluster['meta']
    dataobj = cluster['data']
    compobj = cluster['comp']
    clusterobj = {
            'meta': metaobj,
            'data': dataobj,
            'comp': compobj
            }
    if 'clustermgr' in jscfg:
        clusterobj['cluster_manager'] = cluster['clustermgr']
    runarg = {
            "runtype": args.runtype,
            "dryrun": args.dryrun,
            "shard": args.shard,
            "index": args.index,
            "command": args.command
            }
    runDriver(runarg, clusterobj, machines, args.command)

if  __name__ == '__main__':
    defconfig = get_default_config1()
    actions=["download", "install", "start", "stop", "clean", "check", "run"]
    parser = argparse.ArgumentParser(description='Specify the arguments.')
    parser.add_argument('--action', type=str, help="The action", required=True, choices=actions)
    parser.add_argument('--config', type=str, help="The cluster config path", default="")
    # general enviroment and product  version for all action.
    parser.add_argument('--product_version', type=str, help="kunlun version", default=defconfig['product_version'])
    parser.add_argument('--defuser', type=str, help="the default user", default=defconfig['defuser'])
    parser.add_argument('--defbase', type=str, help="the default basedir", default=defconfig['defbase'])
    # config affects how to run actions.
    parser.add_argument('--autostart', help="whether to start the cluster automaticlly",
            default=defconfig['autostart'], action='store_true')
    parser.add_argument('--sudo', help="whether to use sudo", default=defconfig['sudo'], action='store_true')
    parser.add_argument('--localip', type=str, help="The local ip address", default=defconfig['localip'])
    parser.add_argument('--cloud', help="whether run on cloud images", default=defconfig['cloud'], action='store_true')
    parser.add_argument('--small', help="whether to use small template", default=defconfig['small'], action='store_true')
    parser.add_argument('--valgrind', help="whether to use valgrind", default=defconfig['valgrind'], action='store_true')
    # cluster_manager config
    parser.add_argument('--defbrpc_raft_port', type=int, help="default brpc_raft_port for cluster_manager",
            default=defconfig['defbrpc_raft_port'])
    parser.add_argument('--defbrpc_http_port', type=int, help="default brpc_raft_port for cluster_manager",
            default=defconfig['defbrpc_http_port'])
    # used by install action to also include download action first.
    parser.add_argument('--download', help="whether to overwrite existing file during download",
            default=defconfig['download'], action='store_true')
    # used only by download action
    parser.add_argument('--downloadsite', type=str, help="the download base site", choices=['public', 'devsite', 'internal'],
            default=defconfig['downloadsite'])
    parser.add_argument('--downloadtype', type=str, help="the packages type", choices=['release', 'daily_rel', 'daily_debug'],
            default=defconfig['downloadtype'])
    parser.add_argument('--targetarch', type=str, help="the cpu arch for the packages to download/install",
            default=defconfig['targetarch'])
    parser.add_argument('--overwrite', help="whether to overwrite existing file during download",
            default=defconfig['overwrite'], action='store_true')
    parser.add_argument('--professional', help="whether to download the professional version",
            default=defconfig['professional'], action='store_true')
    parser.add_argument('--rc', help="whether to download the release candidate version", default=defconfig['rc'], action='store_true')
    parser.add_argument('--mariadb', help="whether to use klustron-storage-mariadb", default=defconfig['mariadb'], action='store_true')
   # used only for run action
    parser.add_argument('--runtype', type=str, help="The run type", choices=['hosts', 'meta', 'data', 'comp','sqlmeta', 'sqldata', 'sqlcomp'])
    parser.add_argument('--dryrun', help="do not run the command, just show", default=False, action='store_true')
    parser.add_argument('--shard', type=int, help="The shard number, only used for data/sqldata, start from 1", default=0)
    parser.add_argument('--index', type=int, help="The index number, start from 1", default=0)
    parser.add_argument('--command', type=str, help="the command")
    # used for distributing license
    parser.add_argument('--license_file', type=str, help="the name of the license file", default=defconfig['license_file'])
    # used for storing files for machine info
    parser.add_argument('--infodir', type=str, help="the directory to store the machine info files", default=defconfig['infodir'])

    args = parser.parse_args()
    if not args.defbase.startswith('/'):
        raise ValueError('Error: the default basedir must be absolute path!')
    if args.autostart:
        args.sudo = True

    if args.action == 'download':
        download_packages(args)
        sys.exit(0)

    if args.config == '':
        args.config = "legacy_config.json"

    checkdirs(actions)
    my_print(str(sys.argv))

    jscfg = get_json_from_file(args.config)
    if args.action == 'install':
        if args.download:
            download_packages(args, jscfg)
        generate_install_scripts(jscfg, args)
    elif args.action == 'start':
        generate_start_scripts(jscfg, args)
    elif args.action == 'stop':
        generate_stop_scripts(jscfg, args)
    elif args.action == 'clean':
        generate_clean_scripts(jscfg, args)
    elif args.action == 'check':
        generate_check_scripts(jscfg, args)
    elif args.action == 'run':
        run_command(args)
    else:
        usage()
        sys.exit(1)
