#!/usr/bin/python
"""
  Script to run NEO test suite using Nexedi's test node framework.
"""
import argparse, sys, subprocess, os
import traceback
import taskdistribution
import re

# pattern to get test counts from stdout
SUMMARY_RE = re.compile( \
  r'^(.*)Summary (.*) (?P<test_count>\d+) (.*) (?P<error_count>\d+) (.*) (?P<expected_count>\d+) (.*) (?P<skip_count>\d+|\.) (.*) (?P<duration>\d+(\.\d*)?|\.\d+)s', \
  re.MULTILINE)

# NEO specific environment
TEMP_DIRECTORY  = '{{neo_temp_directory}}/neotest_tmp'
MYSQL_UNIX_PORT = '{{my_cnf_parameters.socket}}'
NEO_TESTS_ADAPTER = 'MySQL'
RUN_NEO_TESTS_COMMAND = '{{ neotestrunner }}'

def parseTestStdOut(data):
  """ 
  Parse output of NEO testrunner script.
  """
  search = SUMMARY_RE.search(data)
  if search:
    groupdict = search.groupdict()
    test_count = int(groupdict['test_count'])
    duration = float(groupdict['duration'])
    error_count = int(groupdict['error_count'])
    expected_count = int(groupdict['expected_count'])
    try:
      # it can match '.'!
      skip_count = int(groupdict['skip_count'])
    except ValueError:
      skip_count = 0
  return test_count, error_count, expected_count, skip_count, duration

def main():
  parser = argparse.ArgumentParser(description='Run a test suite.')
  parser.add_argument('--test_suite', help='The test suite name')
  parser.add_argument('--test_suite_title', help='The test suite title',
                      default=None)
  parser.add_argument('--test_node_title', help='The test node title',
                      default=None)
  parser.add_argument('--project_title', help='The project title',
                      default=None)
  parser.add_argument('--revision', help='The revision to test',
                      default='dummy_revision')
  parser.add_argument('--node_quantity', help='Number of parallel tests to run',
                      default=1, type=int)
  parser.add_argument('--master_url',
                      help='The Url of Master controling many suites',
                      default=None)
  parser.add_argument('--db_list', help='A list of sql connection strings')
  # parameters that needs to be passed to runUnitTest
  parser.add_argument('--conversion_server_hostname', default=None)
  parser.add_argument('--conversion_server_port', default=None)
  parser.add_argument('--volatile_memcached_server_hostname', default=None)
  parser.add_argument('--volatile_memcached_server_port', default=None)
  parser.add_argument('--persistent_memcached_server_hostname', default=None)
  parser.add_argument('--persistent_memcached_server_port', default=None)
  parser.add_argument('--bt5_path', default=None)
  parser.add_argument("--xvfb_bin", default=None)
  parser.add_argument("--firefox_bin", default=None)

  args = parser.parse_args()

  test_suite_title = args.test_suite_title or args.test_suite
  revision = args.revision

  # XXX: think of good name as it represents al tests inside NEO
  # one way is to examing all test files avaliable and register them
  # but then when parsing we must report each test as a test line - i.e.
  # we execute test one by one, collect output and parse and report to Nexedi ERP5.
  test_name_list = ['testNEO']

  tool = taskdistribution.TaskDistributionTool(portal_url = args.master_url)
  test_result = tool.createTestResult(revision = revision,
                                      test_name_list = test_name_list,
                                      node_title = args.test_node_title,
                                      test_title = test_suite_title,
                                      project_title = args.project_title)
  if test_result is not None:
    # run NEO tests
    test_result_line = test_result.start()
    command = "%s -ufz" %RUN_NEO_TESTS_COMMAND
    args = [RUN_NEO_TESTS_COMMAND, '-ufz']
    stdin = file(os.devnull)
    env = {'TEMP': TEMP_DIRECTORY, 
           'MYSQL_UNIX_PORT': MYSQL_UNIX_PORT,
           'NEO_TESTS_ADAPTER': NEO_TESTS_ADAPTER}
    try:
      p = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE, env=env)
    except Exception:
      # Catch any exception here, to warn user instead of beeing silent,
      # by generating fake error result
      result = dict(status_code=-1,
                    command=command,
                    stderr=traceback.format_exc(),
                    stdout='')
      raise EnvironmentError(result)

    # parse test stdout / stderr, hint to speed up use files first!
    data = p.stdout.read()
    test_count, error_count, expected_count, skip_count, duration = parseTestStdOut(data)
    print test_result_line

    # report status back to Nexedi ERP5
    test_result_line.stop(
        test_count = test_count, 
        error_count = error_count, 
        failure_count = expected_count,
        skip_count = skip_count, 
        duration = duration, 
        date = None, # XXX: date when test run or when finished ? 
        command = command,
        stdout= data, 
        stderr='stderr', # XXX:catch it
        html_test_result='')

if __name__ == "__main__":
    main()