From 9fd82122840e9b572ebbd1bb42f21ab5f1278766 Mon Sep 17 00:00:00 2001 From: Allison Piper Date: Tue, 23 Apr 2024 15:42:39 +0000 Subject: [PATCH] Log formatting --- ci/compute-matrix.py | 61 ++++++++++++++++++++++++-------------------- ci/matrix.yaml | 16 +++++++----- 2 files changed, 42 insertions(+), 35 deletions(-) diff --git a/ci/compute-matrix.py b/ci/compute-matrix.py index 9795ca91b81..c783d48d9d1 100755 --- a/ci/compute-matrix.py +++ b/ci/compute-matrix.py @@ -372,7 +372,7 @@ def finalize_workflow_dispatch_groups(workflow_dispatch_groups_orig): two_stage_json.append({'producers': producers, 'consumers': consumers}) group_json['two_stage'] = two_stage_json - # Check for any duplicate job names in standalone arrays. Warn and remove duplicates. + # Check for any duplicate jobs in standalone arrays. Warn and remove duplicates. for group_name, group_json in workflow_dispatch_groups.items(): standalone_jobs = group_json['standalone'] if 'standalone' in group_json else [] unique_standalone_jobs = [] @@ -426,7 +426,7 @@ def finalize_workflow_dispatch_groups(workflow_dispatch_groups_orig): # Natural sort impl (handles embedded numbers in strings, case insensitive) def natural_sort_key(key): - return [int(text) if text.isdigit() else text.lower() for text in re.split('(\d+)', key)] + return [(int(text) if text.isdigit() else text.lower()) for text in re.split('(\d+)', key)] # Sort the dispatch groups by name: workflow_dispatch_groups = dict(sorted(workflow_dispatch_groups.items(), key=lambda x: natural_sort_key(x[0]))) @@ -439,26 +439,6 @@ def natural_sort_key(key): group_json['two_stage'] = sorted( group_json['two_stage'], key=lambda x: natural_sort_key(x['producers'][0]['name'])) - # Count the total number of jobs: - print(f"::begin-group::Job list", file=sys.stderr) - total_jobs = 0 - for group_name, group_json in workflow_dispatch_groups.items(): - if 'standalone' in group_json: - for job_json in group_json['standalone']: - total_jobs += 1 - print(f"{total_jobs} - {group_name}: {job_json['name']}", file=sys.stderr) - if 'two_stage' in group_json: - for two_stage_json in group_json['two_stage']: - for job_json in two_stage_json['producers']: - total_jobs += 1 - print(f"{total_jobs} - {group_name}: {job_json['name']}", file=sys.stderr) - for job_json in two_stage_json['consumers']: - total_jobs += 1 - print(f"{total_jobs} - {group_name}: {job_json['name']}", file=sys.stderr) - - print(f"::end-group::", file=sys.stderr) - print(f"Total jobs: {total_jobs}", file=sys.stderr) - # Check to see if any .two_stage.producers arrays have more than 1 job, which is not supported. See ci-dispatch-two-stage.yml for details. for group_name, group_json in workflow_dispatch_groups.items(): if 'two_stage' in group_json: @@ -476,6 +456,34 @@ def natural_sort_key(key): return workflow_dispatch_groups +def pretty_print_workflow(final_workflow, outfile): + print(f"::group::Job list", file=outfile) + + def print_job_array(total_jobs, key, group_json): + job_array = group_json[key] if key in group_json else [] + key += ":" + for job_json in job_array: + total_jobs += 1 + print(f"{total_jobs:4} {key:13} {job_json['name']}", file=outfile) + return total_jobs + + total_jobs = 0 + for group_name, group_json in final_workflow.items(): + print(f"{'':4} {group_name}:", file=outfile) + total_jobs = print_job_array(total_jobs, 'standalone', group_json) + if 'two_stage' in group_json: + for two_stage_json in group_json['two_stage']: + total_jobs = print_job_array(total_jobs, 'producers', two_stage_json) + total_jobs = print_job_array(total_jobs, 'consumers', two_stage_json) + print(f"::endgroup::", file=outfile) + print(f"Total jobs: {total_jobs}", file=outfile) + + print("::group::Final Workflow JSON", file=outfile) + print(json.dumps(final_workflow, indent=2), file=outfile) + print("::end-group::", file=outfile) + + + def print_gha_workflow(args): matrix_jobs = preprocess_matrix_jobs(matrix_yaml['workflows'][args.workflow]) @@ -494,10 +502,7 @@ def print_gha_workflow(args): final_workflow = finalize_workflow_dispatch_groups(workflow_dispatch_groups) - # Pretty print the workflow json to stderr: - print("::group::Final Workflow", file=sys.stderr) - print(json.dumps(final_workflow, indent=2), file=sys.stderr) - print("::end-group::", file=sys.stderr) + pretty_print_workflow(final_workflow, sys.stderr) # Print a single-line, compact version of the workflow json to stdout: write_output("WORKFLOW", json.dumps(final_workflow, indent=None, separators=(',', ':'))) @@ -506,7 +511,7 @@ def print_gha_workflow(args): def print_devcontainer_info(args): - devcontiner_version = matrix_yaml['devcontainer_version'] + devcontainer_version = matrix_yaml['devcontainer_version'] matrix_jobs = [] for workflow in matrix_yaml['workflows']: @@ -532,7 +537,7 @@ def print_devcontainer_info(args): combo['cuda'] = combo['ctk'] del combo['ctk'] - devcontainer_json = {'devcontainer_version': devcontiner_version, 'combinations': unique_combinations} + devcontainer_json = {'devcontainer_version': devcontainer_version, 'combinations': unique_combinations} # Pretty print the devcontainer json to stdout: print(json.dumps(devcontainer_json, indent=2)) diff --git a/ci/matrix.yaml b/ci/matrix.yaml index ad468ffb05a..4848ccb760a 100644 --- a/ci/matrix.yaml +++ b/ci/matrix.yaml @@ -132,28 +132,30 @@ testing_pool_gpus: # workflows: pull_request: - - {job_types: ['build'], ctk: *ctk_prev_min, host_compiler: [*msvc2017, *gcc6], std: [11, 14] } + # default_projects: nvcc + - {job_types: ['build'], ctk: *ctk_prev_min, host_compiler: *gcc6, std: [11, 14] } - {job_types: ['build'], ctk: *ctk_prev_min, host_compiler: [*gcc7, *gcc8, *gcc9, *llvm9], std: [11, 14, 17] } + - {job_types: ['build'], ctk: *ctk_prev_min, host_compiler: *msvc2017, std: 14 } - {job_types: ['build'], ctk: *ctk_prev_max, host_compiler: *gcc11, std: [11, 14, 17], cmake_cuda_arch: '60;70;80;90'} - {job_types: ['build'], ctk: *ctk_curr, host_compiler: [*gcc7, *gcc8, *gcc9], std: [11, 14, 17] } - {job_types: ['build'], ctk: *ctk_curr, host_compiler: [*gcc10, *gcc11], std: [11, 14, 17, 20] } - {job_types: ['build'], ctk: *ctk_curr, host_compiler: [*llvm9, *llvm10], std: [11, 14, 17] } - {job_types: ['build'], ctk: *ctk_curr, host_compiler: [*llvm11, *llvm12, *llvm13], std: [11, 14, 17, 20] } - {job_types: ['build'], ctk: *ctk_curr, host_compiler: [*llvm14, *llvm15], std: [11, 14, 17, 20] } - - {job_types: ['build'], ctk: *ctk_curr, host_compiler: *msvc2019, std: [14, 17] } - - {job_types: ['build'], ctk: *ctk_curr, host_compiler: *msvc2022, std: [14, 17, 20] } - - {job_types: ['build'], ctk: *ctk_curr, host_compiler: *oneapi, std: [11, 14, 17] } - {job_types: ['build'], ctk: *ctk_curr, host_compiler: [*gcc12, *llvm16], std: [11, 14, 17, 20], cpu: 'arm64'} - {job_types: ['test'], ctk: *ctk_curr, host_compiler: [*gcc12, *llvm16], std: [11, 14, 17, 20], cmake_cuda_arch: '60;70;80;90'} - # clang-cuda: + - {job_types: ['build'], ctk: *ctk_curr, host_compiler: *oneapi, std: [11, 14, 17] } + - {job_types: ['build'], ctk: *ctk_curr, host_compiler: *msvc2019, std: [14, 17] } + - {job_types: ['build'], ctk: *ctk_curr, host_compiler: *msvc2022, std: [14, 17, 20] } + # default_projects: clang-cuda - {job_types: ['build'], device_compiler: *llvm-newest, host_compiler: *llvm-newest, std: [17, 20]} # nvrtc: - {job_types: ['nvrtc'], project: 'libcudacxx', ctk: *ctk_curr, host_compiler: *gcc12, std: [11, 14, 17, 20]} + # verify-codegen: + - { job_types: ['verify_codegen'], project: 'libcudacxx'} # cccl-infra: - {job_types: ['infra'], project: 'cccl', ctk: *ctk_prev_min, host_compiler: [*gcc-oldest, *llvm-oldest]} - {job_types: ['infra'], project: 'cccl', ctk: *ctk_curr, host_compiler: [*gcc-newest, *llvm-newest]} - # verify-codegen: - - { job_types: ['verify_codegen'], project: 'libcudacxx'} nightly: - {job_types: ['test'], ctk: *ctk_prev_min, gpu: 'v100', cmake_cuda_arch: '70', host_compiler: *gcc6, std: [11] } - {job_types: ['test'], ctk: *ctk_prev_min, gpu: 't4', cmake_cuda_arch: '75', host_compiler: *llvm9, std: [17] }