1#!/usr/bin/env python3
2# SPDX-License-Identifier: GPL-2.0
3
4"""
5tdc.py - Linux tc (Traffic Control) unit test driver
6
7Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
8"""
9
10import re
11import os
12import sys
13import argparse
14import importlib
15import json
16import subprocess
17import time
18import traceback
19from collections import OrderedDict
20from string import Template
21
22from tdc_config import *
23from tdc_helper import *
24
25import TdcPlugin
26from TdcResults import *
27
28class PluginDependencyException(Exception):
29    def __init__(self, missing_pg):
30        self.missing_pg = missing_pg
31
32class PluginMgrTestFail(Exception):
33    def __init__(self, stage, output, message):
34        self.stage = stage
35        self.output = output
36        self.message = message
37
38class PluginMgr:
39    def __init__(self, argparser):
40        super().__init__()
41        self.plugins = {}
42        self.plugin_instances = []
43        self.failed_plugins = {}
44        self.argparser = argparser
45
46        # TODO, put plugins in order
47        plugindir = os.getenv('TDC_PLUGIN_DIR', './plugins')
48        for dirpath, dirnames, filenames in os.walk(plugindir):
49            for fn in filenames:
50                if (fn.endswith('.py') and
51                    not fn == '__init__.py' and
52                    not fn.startswith('#') and
53                    not fn.startswith('.#')):
54                    mn = fn[0:-3]
55                    foo = importlib.import_module('plugins.' + mn)
56                    self.plugins[mn] = foo
57                    self.plugin_instances.append(foo.SubPlugin())
58
59    def load_plugin(self, pgdir, pgname):
60        pgname = pgname[0:-3]
61        foo = importlib.import_module('{}.{}'.format(pgdir, pgname))
62        self.plugins[pgname] = foo
63        self.plugin_instances.append(foo.SubPlugin())
64        self.plugin_instances[-1].check_args(self.args, None)
65
66    def get_required_plugins(self, testlist):
67        '''
68        Get all required plugins from the list of test cases and return
69        all unique items.
70        '''
71        reqs = []
72        for t in testlist:
73            try:
74                if 'requires' in t['plugins']:
75                    if isinstance(t['plugins']['requires'], list):
76                        reqs.extend(t['plugins']['requires'])
77                    else:
78                        reqs.append(t['plugins']['requires'])
79            except KeyError:
80                continue
81        reqs = get_unique_item(reqs)
82        return reqs
83
84    def load_required_plugins(self, reqs, parser, args, remaining):
85        '''
86        Get all required plugins from the list of test cases and load any plugin
87        that is not already enabled.
88        '''
89        pgd = ['plugin-lib', 'plugin-lib-custom']
90        pnf = []
91
92        for r in reqs:
93            if r not in self.plugins:
94                fname = '{}.py'.format(r)
95                source_path = []
96                for d in pgd:
97                    pgpath = '{}/{}'.format(d, fname)
98                    if os.path.isfile(pgpath):
99                        source_path.append(pgpath)
100                if len(source_path) == 0:
101                    print('ERROR: unable to find required plugin {}'.format(r))
102                    pnf.append(fname)
103                    continue
104                elif len(source_path) > 1:
105                    print('WARNING: multiple copies of plugin {} found, using version found')
106                    print('at {}'.format(source_path[0]))
107                pgdir = source_path[0]
108                pgdir = pgdir.split('/')[0]
109                self.load_plugin(pgdir, fname)
110        if len(pnf) > 0:
111            raise PluginDependencyException(pnf)
112
113        parser = self.call_add_args(parser)
114        (args, remaining) = parser.parse_known_args(args=remaining, namespace=args)
115        return args
116
117    def call_pre_suite(self, testcount, testidlist):
118        for pgn_inst in self.plugin_instances:
119            pgn_inst.pre_suite(testcount, testidlist)
120
121    def call_post_suite(self, index):
122        for pgn_inst in reversed(self.plugin_instances):
123            pgn_inst.post_suite(index)
124
125    def call_pre_case(self, caseinfo, *, test_skip=False):
126        for pgn_inst in self.plugin_instances:
127            try:
128                pgn_inst.pre_case(caseinfo, test_skip)
129            except Exception as ee:
130                print('exception {} in call to pre_case for {} plugin'.
131                      format(ee, pgn_inst.__class__))
132                print('test_ordinal is {}'.format(test_ordinal))
133                print('testid is {}'.format(caseinfo['id']))
134                raise
135
136    def call_post_case(self):
137        for pgn_inst in reversed(self.plugin_instances):
138            pgn_inst.post_case()
139
140    def call_pre_execute(self):
141        for pgn_inst in self.plugin_instances:
142            pgn_inst.pre_execute()
143
144    def call_post_execute(self):
145        for pgn_inst in reversed(self.plugin_instances):
146            pgn_inst.post_execute()
147
148    def call_add_args(self, parser):
149        for pgn_inst in self.plugin_instances:
150            parser = pgn_inst.add_args(parser)
151        return parser
152
153    def call_check_args(self, args, remaining):
154        for pgn_inst in self.plugin_instances:
155            pgn_inst.check_args(args, remaining)
156
157    def call_adjust_command(self, stage, command):
158        for pgn_inst in self.plugin_instances:
159            command = pgn_inst.adjust_command(stage, command)
160        return command
161
162    def set_args(self, args):
163        self.args = args
164
165    @staticmethod
166    def _make_argparser(args):
167        self.argparser = argparse.ArgumentParser(
168            description='Linux TC unit tests')
169
170def replace_keywords(cmd):
171    """
172    For a given executable command, substitute any known
173    variables contained within NAMES with the correct values
174    """
175    tcmd = Template(cmd)
176    subcmd = tcmd.safe_substitute(NAMES)
177    return subcmd
178
179
180def exec_cmd(args, pm, stage, command):
181    """
182    Perform any required modifications on an executable command, then run
183    it in a subprocess and return the results.
184    """
185    if len(command.strip()) == 0:
186        return None, None
187    if '$' in command:
188        command = replace_keywords(command)
189
190    command = pm.call_adjust_command(stage, command)
191    if args.verbose > 0:
192        print('command "{}"'.format(command))
193    proc = subprocess.Popen(command,
194        shell=True,
195        stdout=subprocess.PIPE,
196        stderr=subprocess.PIPE,
197        env=ENVIR)
198
199    try:
200        (rawout, serr) = proc.communicate(timeout=NAMES['TIMEOUT'])
201        if proc.returncode != 0 and len(serr) > 0:
202            foutput = serr.decode("utf-8", errors="ignore")
203        else:
204            foutput = rawout.decode("utf-8", errors="ignore")
205    except subprocess.TimeoutExpired:
206        foutput = "Command \"{}\" timed out\n".format(command)
207        proc.returncode = 255
208
209    proc.stdout.close()
210    proc.stderr.close()
211    return proc, foutput
212
213
214def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
215    """
216    Execute the setup/teardown commands for a test case.
217    Optionally terminate test execution if the command fails.
218    """
219    if args.verbose > 0:
220        print('{}'.format(prefix))
221    for cmdinfo in cmdlist:
222        if isinstance(cmdinfo, list):
223            exit_codes = cmdinfo[1:]
224            cmd = cmdinfo[0]
225        else:
226            exit_codes = [0]
227            cmd = cmdinfo
228
229        if not cmd:
230            continue
231
232        (proc, foutput) = exec_cmd(args, pm, stage, cmd)
233
234        if proc and (proc.returncode not in exit_codes):
235            print('', file=sys.stderr)
236            print("{} *** Could not execute: \"{}\"".format(prefix, cmd),
237                  file=sys.stderr)
238            print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
239                  file=sys.stderr)
240            print("returncode {}; expected {}".format(proc.returncode,
241                                                      exit_codes))
242            print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
243            print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
244            print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
245            raise PluginMgrTestFail(
246                stage, output,
247                '"{}" did not complete successfully'.format(prefix))
248
249def run_one_test(pm, args, index, tidx):
250    global NAMES
251    result = True
252    tresult = ""
253    tap = ""
254    res = TestResult(tidx['id'], tidx['name'])
255    if args.verbose > 0:
256        print("\t====================\n=====> ", end="")
257    print("Test " + tidx["id"] + ": " + tidx["name"])
258
259    if 'skip' in tidx:
260        if tidx['skip'] == 'yes':
261            res = TestResult(tidx['id'], tidx['name'])
262            res.set_result(ResultState.skip)
263            res.set_errormsg('Test case designated as skipped.')
264            pm.call_pre_case(tidx, test_skip=True)
265            pm.call_post_execute()
266            return res
267
268    # populate NAMES with TESTID for this test
269    NAMES['TESTID'] = tidx['id']
270
271    pm.call_pre_case(tidx)
272    prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
273
274    if (args.verbose > 0):
275        print('-----> execute stage')
276    pm.call_pre_execute()
277    (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
278    if p:
279        exit_code = p.returncode
280    else:
281        exit_code = None
282
283    pm.call_post_execute()
284
285    if (exit_code is None or exit_code != int(tidx["expExitCode"])):
286        print("exit: {!r}".format(exit_code))
287        print("exit: {}".format(int(tidx["expExitCode"])))
288        #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
289        res.set_result(ResultState.fail)
290        res.set_failmsg('Command exited with {}, expected {}\n{}'.format(exit_code, tidx["expExitCode"], procout))
291        print(procout)
292    else:
293        if args.verbose > 0:
294            print('-----> verify stage')
295        match_pattern = re.compile(
296            str(tidx["matchPattern"]), re.DOTALL | re.MULTILINE)
297        (p, procout) = exec_cmd(args, pm, 'verify', tidx["verifyCmd"])
298        if procout:
299            match_index = re.findall(match_pattern, procout)
300            if len(match_index) != int(tidx["matchCount"]):
301                res.set_result(ResultState.fail)
302                res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
303            else:
304                res.set_result(ResultState.success)
305        elif int(tidx["matchCount"]) != 0:
306            res.set_result(ResultState.fail)
307            res.set_failmsg('No output generated by verify command.')
308        else:
309            res.set_result(ResultState.success)
310
311    prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
312    pm.call_post_case()
313
314    index += 1
315
316    # remove TESTID from NAMES
317    del(NAMES['TESTID'])
318    return res
319
320def test_runner(pm, args, filtered_tests):
321    """
322    Driver function for the unit tests.
323
324    Prints information about the tests being run, executes the setup and
325    teardown commands and the command under test itself. Also determines
326    success/failure based on the information in the test case and generates
327    TAP output accordingly.
328    """
329    testlist = filtered_tests
330    tcount = len(testlist)
331    index = 1
332    tap = ''
333    badtest = None
334    stage = None
335    emergency_exit = False
336    emergency_exit_message = ''
337
338    tsr = TestSuiteReport()
339
340    try:
341        pm.call_pre_suite(tcount, [tidx['id'] for tidx in testlist])
342    except Exception as ee:
343        ex_type, ex, ex_tb = sys.exc_info()
344        print('Exception {} {} (caught in pre_suite).'.
345              format(ex_type, ex))
346        traceback.print_tb(ex_tb)
347        emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex)
348        emergency_exit = True
349        stage = 'pre-SUITE'
350
351    if emergency_exit:
352        pm.call_post_suite(index)
353        return emergency_exit_message
354    if args.verbose > 1:
355        print('give test rig 2 seconds to stabilize')
356    time.sleep(2)
357    for tidx in testlist:
358        if "flower" in tidx["category"] and args.device == None:
359            if args.verbose > 1:
360                print('Not executing test {} {} because DEV2 not defined'.
361                      format(tidx['id'], tidx['name']))
362            res = TestResult(tidx['id'], tidx['name'])
363            res.set_result(ResultState.skip)
364            res.set_errormsg('Not executed because DEV2 is not defined')
365            tsr.add_resultdata(res)
366            continue
367        try:
368            badtest = tidx  # in case it goes bad
369            res = run_one_test(pm, args, index, tidx)
370            tsr.add_resultdata(res)
371        except PluginMgrTestFail as pmtf:
372            ex_type, ex, ex_tb = sys.exc_info()
373            stage = pmtf.stage
374            message = pmtf.message
375            output = pmtf.output
376            res = TestResult(tidx['id'], tidx['name'])
377            res.set_result(ResultState.skip)
378            res.set_errormsg(pmtf.message)
379            res.set_failmsg(pmtf.output)
380            tsr.add_resultdata(res)
381            index += 1
382            print(message)
383            print('Exception {} {} (caught in test_runner, running test {} {} {} stage {})'.
384                  format(ex_type, ex, index, tidx['id'], tidx['name'], stage))
385            print('---------------')
386            print('traceback')
387            traceback.print_tb(ex_tb)
388            print('---------------')
389            if stage == 'teardown':
390                print('accumulated output for this test:')
391                if pmtf.output:
392                    print(pmtf.output)
393            print('---------------')
394            break
395        index += 1
396
397    # if we failed in setup or teardown,
398    # fill in the remaining tests with ok-skipped
399    count = index
400
401    if tcount + 1 != count:
402        for tidx in testlist[count - 1:]:
403            res = TestResult(tidx['id'], tidx['name'])
404            res.set_result(ResultState.skip)
405            msg = 'skipped - previous {} failed {} {}'.format(stage,
406                index, badtest.get('id', '--Unknown--'))
407            res.set_errormsg(msg)
408            tsr.add_resultdata(res)
409            count += 1
410
411    if args.pause:
412        print('Want to pause\nPress enter to continue ...')
413        if input(sys.stdin):
414            print('got something on stdin')
415
416    pm.call_post_suite(index)
417
418    return tsr
419
420def has_blank_ids(idlist):
421    """
422    Search the list for empty ID fields and return true/false accordingly.
423    """
424    return not(all(k for k in idlist))
425
426
427def load_from_file(filename):
428    """
429    Open the JSON file containing the test cases and return them
430    as list of ordered dictionary objects.
431    """
432    try:
433        with open(filename) as test_data:
434            testlist = json.load(test_data, object_pairs_hook=OrderedDict)
435    except json.JSONDecodeError as jde:
436        print('IGNORING test case file {}\n\tBECAUSE:  {}'.format(filename, jde))
437        testlist = list()
438    else:
439        idlist = get_id_list(testlist)
440        if (has_blank_ids(idlist)):
441            for k in testlist:
442                k['filename'] = filename
443    return testlist
444
445
446def args_parse():
447    """
448    Create the argument parser.
449    """
450    parser = argparse.ArgumentParser(description='Linux TC unit tests')
451    return parser
452
453
454def set_args(parser):
455    """
456    Set the command line arguments for tdc.
457    """
458    parser.add_argument(
459        '--outfile', type=str,
460        help='Path to the file in which results should be saved. ' +
461        'Default target is the current directory.')
462    parser.add_argument(
463        '-p', '--path', type=str,
464        help='The full path to the tc executable to use')
465    sg = parser.add_argument_group(
466        'selection', 'select which test cases: ' +
467        'files plus directories; filtered by categories plus testids')
468    ag = parser.add_argument_group(
469        'action', 'select action to perform on selected test cases')
470
471    sg.add_argument(
472        '-D', '--directory', nargs='+', metavar='DIR',
473        help='Collect tests from the specified directory(ies) ' +
474        '(default [tc-tests])')
475    sg.add_argument(
476        '-f', '--file', nargs='+', metavar='FILE',
477        help='Run tests from the specified file(s)')
478    sg.add_argument(
479        '-c', '--category', nargs='*', metavar='CATG', default=['+c'],
480        help='Run tests only from the specified category/ies, ' +
481        'or if no category/ies is/are specified, list known categories.')
482    sg.add_argument(
483        '-e', '--execute', nargs='+', metavar='ID',
484        help='Execute the specified test cases with specified IDs')
485    ag.add_argument(
486        '-l', '--list', action='store_true',
487        help='List all test cases, or those only within the specified category')
488    ag.add_argument(
489        '-s', '--show', action='store_true', dest='showID',
490        help='Display the selected test cases')
491    ag.add_argument(
492        '-i', '--id', action='store_true', dest='gen_id',
493        help='Generate ID numbers for new test cases')
494    parser.add_argument(
495        '-v', '--verbose', action='count', default=0,
496        help='Show the commands that are being run')
497    parser.add_argument(
498        '--format', default='tap', const='tap', nargs='?',
499        choices=['none', 'xunit', 'tap'],
500        help='Specify the format for test results. (Default: TAP)')
501    parser.add_argument('-d', '--device',
502                        help='Execute the test case in flower category')
503    parser.add_argument(
504        '-P', '--pause', action='store_true',
505        help='Pause execution just before post-suite stage')
506    return parser
507
508
509def check_default_settings(args, remaining, pm):
510    """
511    Process any arguments overriding the default settings,
512    and ensure the settings are correct.
513    """
514    # Allow for overriding specific settings
515    global NAMES
516
517    if args.path != None:
518        NAMES['TC'] = args.path
519    if args.device != None:
520        NAMES['DEV2'] = args.device
521    if 'TIMEOUT' not in NAMES:
522        NAMES['TIMEOUT'] = None
523    if not os.path.isfile(NAMES['TC']):
524        print("The specified tc path " + NAMES['TC'] + " does not exist.")
525        exit(1)
526
527    pm.call_check_args(args, remaining)
528
529
530def get_id_list(alltests):
531    """
532    Generate a list of all IDs in the test cases.
533    """
534    return [x["id"] for x in alltests]
535
536
537def check_case_id(alltests):
538    """
539    Check for duplicate test case IDs.
540    """
541    idl = get_id_list(alltests)
542    return [x for x in idl if idl.count(x) > 1]
543
544
545def does_id_exist(alltests, newid):
546    """
547    Check if a given ID already exists in the list of test cases.
548    """
549    idl = get_id_list(alltests)
550    return (any(newid == x for x in idl))
551
552
553def generate_case_ids(alltests):
554    """
555    If a test case has a blank ID field, generate a random hex ID for it
556    and then write the test cases back to disk.
557    """
558    import random
559    for c in alltests:
560        if (c["id"] == ""):
561            while True:
562                newid = str('{:04x}'.format(random.randrange(16**4)))
563                if (does_id_exist(alltests, newid)):
564                    continue
565                else:
566                    c['id'] = newid
567                    break
568
569    ufilename = []
570    for c in alltests:
571        if ('filename' in c):
572            ufilename.append(c['filename'])
573    ufilename = get_unique_item(ufilename)
574    for f in ufilename:
575        testlist = []
576        for t in alltests:
577            if 'filename' in t:
578                if t['filename'] == f:
579                    del t['filename']
580                    testlist.append(t)
581        outfile = open(f, "w")
582        json.dump(testlist, outfile, indent=4)
583        outfile.write("\n")
584        outfile.close()
585
586def filter_tests_by_id(args, testlist):
587    '''
588    Remove tests from testlist that are not in the named id list.
589    If id list is empty, return empty list.
590    '''
591    newlist = list()
592    if testlist and args.execute:
593        target_ids = args.execute
594
595        if isinstance(target_ids, list) and (len(target_ids) > 0):
596            newlist = list(filter(lambda x: x['id'] in target_ids, testlist))
597    return newlist
598
599def filter_tests_by_category(args, testlist):
600    '''
601    Remove tests from testlist that are not in a named category.
602    '''
603    answer = list()
604    if args.category and testlist:
605        test_ids = list()
606        for catg in set(args.category):
607            if catg == '+c':
608                continue
609            print('considering category {}'.format(catg))
610            for tc in testlist:
611                if catg in tc['category'] and tc['id'] not in test_ids:
612                    answer.append(tc)
613                    test_ids.append(tc['id'])
614
615    return answer
616
617
618def get_test_cases(args):
619    """
620    If a test case file is specified, retrieve tests from that file.
621    Otherwise, glob for all json files in subdirectories and load from
622    each one.
623    Also, if requested, filter by category, and add tests matching
624    certain ids.
625    """
626    import fnmatch
627
628    flist = []
629    testdirs = ['tc-tests']
630
631    if args.file:
632        # at least one file was specified - remove the default directory
633        testdirs = []
634
635        for ff in args.file:
636            if not os.path.isfile(ff):
637                print("IGNORING file " + ff + "\n\tBECAUSE does not exist.")
638            else:
639                flist.append(os.path.abspath(ff))
640
641    if args.directory:
642        testdirs = args.directory
643
644    for testdir in testdirs:
645        for root, dirnames, filenames in os.walk(testdir):
646            for filename in fnmatch.filter(filenames, '*.json'):
647                candidate = os.path.abspath(os.path.join(root, filename))
648                if candidate not in testdirs:
649                    flist.append(candidate)
650
651    alltestcases = list()
652    for casefile in flist:
653        alltestcases = alltestcases + (load_from_file(casefile))
654
655    allcatlist = get_test_categories(alltestcases)
656    allidlist = get_id_list(alltestcases)
657
658    testcases_by_cats = get_categorized_testlist(alltestcases, allcatlist)
659    idtestcases = filter_tests_by_id(args, alltestcases)
660    cattestcases = filter_tests_by_category(args, alltestcases)
661
662    cat_ids = [x['id'] for x in cattestcases]
663    if args.execute:
664        if args.category:
665            alltestcases = cattestcases + [x for x in idtestcases if x['id'] not in cat_ids]
666        else:
667            alltestcases = idtestcases
668    else:
669        if cat_ids:
670            alltestcases = cattestcases
671        else:
672            # just accept the existing value of alltestcases,
673            # which has been filtered by file/directory
674            pass
675
676    return allcatlist, allidlist, testcases_by_cats, alltestcases
677
678
679def set_operation_mode(pm, parser, args, remaining):
680    """
681    Load the test case data and process remaining arguments to determine
682    what the script should do for this run, and call the appropriate
683    function.
684    """
685    ucat, idlist, testcases, alltests = get_test_cases(args)
686
687    if args.gen_id:
688        if (has_blank_ids(idlist)):
689            alltests = generate_case_ids(alltests)
690        else:
691            print("No empty ID fields found in test files.")
692        exit(0)
693
694    duplicate_ids = check_case_id(alltests)
695    if (len(duplicate_ids) > 0):
696        print("The following test case IDs are not unique:")
697        print(str(set(duplicate_ids)))
698        print("Please correct them before continuing.")
699        exit(1)
700
701    if args.showID:
702        for atest in alltests:
703            print_test_case(atest)
704        exit(0)
705
706    if isinstance(args.category, list) and (len(args.category) == 0):
707        print("Available categories:")
708        print_sll(ucat)
709        exit(0)
710
711    if args.list:
712        if args.list:
713            list_test_cases(alltests)
714            exit(0)
715
716    if len(alltests):
717        req_plugins = pm.get_required_plugins(alltests)
718        try:
719            args = pm.load_required_plugins(req_plugins, parser, args, remaining)
720        except PluginDependencyException as pde:
721            print('The following plugins were not found:')
722            print('{}'.format(pde.missing_pg))
723        catresults = test_runner(pm, args, alltests)
724        if args.format == 'none':
725            print('Test results output suppression requested\n')
726        else:
727            print('\nAll test results: \n')
728            if args.format == 'xunit':
729                suffix = 'xml'
730                res = catresults.format_xunit()
731            elif args.format == 'tap':
732                suffix = 'tap'
733                res = catresults.format_tap()
734            print(res)
735            print('\n\n')
736            if not args.outfile:
737                fname = 'test-results.{}'.format(suffix)
738            else:
739                fname = args.outfile
740            with open(fname, 'w') as fh:
741                fh.write(res)
742                fh.close()
743                if os.getenv('SUDO_UID') is not None:
744                    os.chown(fname, uid=int(os.getenv('SUDO_UID')),
745                        gid=int(os.getenv('SUDO_GID')))
746    else:
747        print('No tests found\n')
748
749def main():
750    """
751    Start of execution; set up argument parser and get the arguments,
752    and start operations.
753    """
754    parser = args_parse()
755    parser = set_args(parser)
756    pm = PluginMgr(parser)
757    parser = pm.call_add_args(parser)
758    (args, remaining) = parser.parse_known_args()
759    args.NAMES = NAMES
760    pm.set_args(args)
761    check_default_settings(args, remaining, pm)
762    if args.verbose > 2:
763        print('args is {}'.format(args))
764
765    set_operation_mode(pm, parser, args, remaining)
766
767    exit(0)
768
769
770if __name__ == "__main__":
771    main()
772