Skip to content

Commit f3576f7

Browse files
committed
Integrated workflow and test results Dashboard.
Integrated workflow and test results Dashboard and also append the post merge cron job. Signed-off-by: Mani Deepak Gurram <manigurr@qti.qualcomm.com>
1 parent 7461781 commit f3576f7

16 files changed

Lines changed: 1482 additions & 0 deletions
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
# name: AWS S3 Helper
2+
description: Upload and download files from AWS S3
3+
4+
inputs:
5+
s3_bucket:
6+
description: S3 Bucket Name
7+
required: true
8+
local_file:
9+
description: Local file paths
10+
required: false
11+
default: ../artifacts/file_list.txt
12+
download_file:
13+
description: Download file paths
14+
required: false
15+
default: ''
16+
download_location:
17+
description: File download location
18+
required: false
19+
default: .
20+
mode:
21+
description: Mode of operation (upload/download)
22+
required: true
23+
default: single-upload
24+
upload_location:
25+
description: Upload location
26+
required: true
27+
28+
outputs:
29+
presigned_url:
30+
description: Pre-signed URL for the uploaded file
31+
value: ${{ steps.sync-data.outputs.presigned_url }}
32+
s3_location:
33+
description: Upload location
34+
value: ${{ inputs.upload_location }}
35+
36+
runs:
37+
using: "composite"
38+
steps:
39+
- name: Sync Data
40+
id: sync-data
41+
shell: bash
42+
env:
43+
UPLOAD_LOCATION: ${{ inputs.upload_location }}
44+
run: |
45+
echo "::group::Uploading files to S3"
46+
case "${{ inputs.mode }}" in
47+
multi-upload)
48+
if [ ! -s "${{ inputs.local_file }}" ]; then
49+
echo "❌ File list is empty. No files to upload."
50+
exit 1
51+
fi
52+
53+
echo "📄 Contents of file list:"
54+
cat "${{ inputs.local_file }}"
55+
56+
first_line=true
57+
manifest="${{ github.workspace }}/presigned_urls.json"
58+
echo "{" > "${manifest}"
59+
60+
while IFS= read -r file; do
61+
resolved_file=$(readlink -f "$file")
62+
if [ -f "$resolved_file" ]; then
63+
filename=$(basename "$resolved_file")
64+
echo "📤 Uploading $filename..."
65+
aws s3 cp "$resolved_file" "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename"
66+
presigned_url=$(aws s3 presign "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" --expires-in 259200)
67+
68+
if [ "$first_line" = true ]; then
69+
first_line=false
70+
else
71+
echo "," >> "${manifest}"
72+
fi
73+
74+
# Key = filename, Value = presigned_url
75+
echo " \"${filename}\": \"${presigned_url}\"" >> "${manifest}"
76+
echo "✅ Pre-signed URL for $filename: $presigned_url"
77+
else
78+
echo "⚠️ Skipping: $file is not a regular file or not accessible."
79+
fi
80+
done < "${{ inputs.local_file }}"
81+
82+
echo "}" >> "${manifest}"
83+
;;
84+
single-upload)
85+
resolved_file=$(readlink -f "${{ inputs.local_file }}")
86+
filename=$(basename "$resolved_file")
87+
aws s3 cp "$resolved_file" "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename"
88+
presigned_url=$(aws s3 presign "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" --expires-in 259200)
89+
echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT"
90+
;;
91+
download)
92+
download_dir=$(realpath "${{ inputs.download_location }}")
93+
aws s3 cp "s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }}" "$download_dir"
94+
;;
95+
*)
96+
echo "Invalid mode. Use 'upload', 'multi-upload', or 'download'."
97+
exit 1
98+
;;
99+
esac
100+
echo "::endgroup::"
101+
102+
- name: Upload presigned URL manifest
103+
if: ${{ inputs.mode == 'multi-upload' }}
104+
uses: actions/upload-artifact@v4
105+
with:
106+
name: presigned_urls.json
107+
path: ${{ github.workspace }}/presigned_urls.json
108+
retention-days: 3

.github/actions/build/action.yml

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
name: Build Workspace
2+
description: |
3+
Builds kernel and video-driver using a Docker image.
4+
5+
inputs:
6+
docker_image:
7+
description: Docker image to use
8+
required: true
9+
workspace_path:
10+
description: Path to workspace directory
11+
required: true
12+
13+
runs:
14+
using: "composite"
15+
steps:
16+
- name: Build kernel
17+
shell: bash
18+
run: |
19+
docker run --rm \
20+
-v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \
21+
-w "${{ inputs.workspace_path }}/kernel" \
22+
--user $(id -u):$(id -g) \
23+
${{ inputs.docker_image }} \
24+
bash -c "
25+
make O=../kobj ARCH=arm64 defconfig &&
26+
make O=../kobj -j\$(nproc) &&
27+
make O=../kobj -j\$(nproc) dir-pkg INSTALL_MOD_STRIP=1
28+
"
29+
30+
- name: Build video-driver
31+
shell: bash
32+
run: |
33+
docker run --rm \
34+
-v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \
35+
-w "${{ inputs.workspace_path }}/video-driver" \
36+
--user $(id -u):$(id -g) \
37+
${{ inputs.docker_image }} \
38+
bash -c "
39+
make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \
40+
-C ${{ inputs.workspace_path }}/kobj \
41+
M=\$(pwd) VIDEO_KERNEL_ROOT=\$(pwd) modules
42+
"
Lines changed: 196 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,196 @@
1+
name: LAVA Job Render
2+
inputs:
3+
docker_image:
4+
description: Docker image
5+
required: true
6+
default: kmake-image:ver.1.0
7+
8+
runs:
9+
using: "composite"
10+
steps:
11+
- name: Process presigned_urls.json
12+
id: process_urls
13+
uses: actions/github-script@v7
14+
with:
15+
script: |
16+
const fs = require('fs');
17+
const p = require('path');
18+
19+
const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json');
20+
if (!fs.existsSync(filePath)) {
21+
core.setFailed(`File not found: ${filePath}`);
22+
}
23+
24+
// Read JSON mapping of uploaded file paths -> presigned URLs
25+
const data = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
26+
27+
function findUrlByFilename(filename) {
28+
for (const [path, url] of Object.entries(data)) {
29+
if (path.endsWith(filename)) return url;
30+
}
31+
return null;
32+
}
33+
34+
const modulesTarUrl = findUrlByFilename('modules.tar.xz');
35+
const imageUrl = findUrlByFilename('Image');
36+
const mergedRamdiskUrl = findUrlByFilename('video-merged.cpio.gz');
37+
const vmlinuxUrl = findUrlByFilename('vmlinux');
38+
39+
// DTB is expected to be "<MACHINE>.dtb"
40+
const dtbFilename = `${process.env.MACHINE}.dtb`;
41+
const dtbUrl = findUrlByFilename(dtbFilename);
42+
43+
core.setOutput('modules_url', modulesTarUrl || '');
44+
core.setOutput('image_url', imageUrl || '');
45+
core.setOutput('vmlinux_url', vmlinuxUrl || '');
46+
core.setOutput('dtb_url', dtbUrl || '');
47+
core.setOutput('merged_ramdisk_url', mergedRamdiskUrl || '');
48+
49+
console.log(`Modules URL: ${modulesTarUrl}`);
50+
console.log(`Image URL: ${imageUrl}`);
51+
console.log(`Vmlinux URL: ${vmlinuxUrl}`);
52+
console.log(`Dtb URL: ${dtbUrl}`);
53+
console.log(`Merged Ramdisk URL: ${mergedRamdiskUrl}`);
54+
55+
- name: Create metadata.json
56+
id: create_metadata
57+
shell: bash
58+
run: |
59+
echo "Creating metadata.json from job_render templates"
60+
cd ../job_render
61+
docker run -i --rm \
62+
--user "$(id -u):$(id -g)" \
63+
--workdir="$PWD" \
64+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
65+
-e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \
66+
${{ inputs.docker_image }} \
67+
jq '.artifacts["dtbs/qcom/${{ env.MACHINE }}.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json
68+
69+
- name: Upload metadata.json
70+
id: upload_metadata
71+
uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.qclinux.main
72+
with:
73+
local_file: ../job_render/data/metadata.json
74+
s3_bucket: qli-prd-video-gh-artifacts
75+
mode: single-upload
76+
77+
- name: Create template json cloudData.json
78+
shell: bash
79+
run: |
80+
echo "Populating cloudData.json with kernel, vmlinux, modules, metadata, ramdisk"
81+
metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}"
82+
image_url="${{ steps.process_urls.outputs.image_url }}"
83+
vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}"
84+
modules_url="${{ steps.process_urls.outputs.modules_url }}"
85+
merged_ramdisk_url="${{ steps.process_urls.outputs.merged_ramdisk_url }}"
86+
87+
cd ../job_render
88+
89+
# metadata
90+
docker run -i --rm \
91+
--user "$(id -u):$(id -g)" \
92+
--workdir="$PWD" \
93+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
94+
-e metadata_url="$metadata_url" \
95+
${{ inputs.docker_image }} \
96+
jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
97+
98+
# kernel Image
99+
docker run -i --rm \
100+
--user "$(id -u):$(id -g)" \
101+
--workdir="$PWD" \
102+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
103+
-e image_url="$image_url" \
104+
${{ inputs.docker_image }} \
105+
jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
106+
107+
# vmlinux (set only if present)
108+
docker run -i --rm \
109+
--user "$(id -u):$(id -g)" \
110+
--workdir="$PWD" \
111+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
112+
-e vmlinux_url="$vmlinux_url" \
113+
${{ inputs.docker_image }} \
114+
sh -c 'if [ -n "$vmlinux_url" ]; then jq ".artifacts.vmlinux = env.vmlinux_url" data/cloudData.json > temp.json && mv temp.json data/cloudData.json; fi'
115+
116+
# modules
117+
docker run -i --rm \
118+
--user "$(id -u):$(id -g)" \
119+
--workdir="$PWD" \
120+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
121+
-e modules_url="$modules_url" \
122+
${{ inputs.docker_image }} \
123+
jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
124+
125+
# ramdisk: use merged only here (fallback added in next step if missing)
126+
docker run -i --rm \
127+
--user "$(id -u):$(id -g)" \
128+
--workdir="$PWD" \
129+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
130+
-e merged_ramdisk_url="$merged_ramdisk_url" \
131+
${{ inputs.docker_image }} \
132+
sh -c 'if [ -n "$merged_ramdisk_url" ]; then jq ".artifacts.ramdisk = env.merged_ramdisk_url" data/cloudData.json > temp.json && mv temp.json data/cloudData.json; fi'
133+
134+
- name: Update firmware and ramdisk
135+
shell: bash
136+
run: |
137+
set -euo pipefail
138+
cd ../job_render
139+
140+
# Fallback to stable kerneltest ramdisk only if merged ramdisk is not available
141+
if [ -z "${{ steps.process_urls.outputs.merged_ramdisk_url }}" ]; then
142+
echo "Merged ramdisk not found. Using stable kerneltest ramdisk fallback."
143+
ramdisk_url="$(aws s3 presign s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)"
144+
docker run -i --rm \
145+
--user "$(id -u):$(id -g)" \
146+
--workdir="$PWD" \
147+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
148+
-e ramdisk_url="$ramdisk_url" \
149+
${{ inputs.docker_image }} \
150+
jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
151+
else
152+
echo "Ramdisk set from merged source; skipping kerneltest fallback."
153+
fi
154+
155+
# Optional board-specific firmware initramfs
156+
if [ -n "${{ env.FIRMWARE }}" ]; then
157+
case "${{ env.FIRMWARE }}" in
158+
sm8750-mtp)
159+
FW_FILE="initramfs-firmware-dragonboard410c-image-sm8750-mtp.cpio.gz"
160+
;;
161+
*)
162+
FW_FILE="initramfs-firmware-${{ env.FIRMWARE }}-image-qcom-armv8a.cpio.gz"
163+
;;
164+
esac
165+
166+
echo "Using firmware file: $FW_FILE"
167+
168+
firmware_url="$(aws s3 presign s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/${FW_FILE} --expires 7600)"
169+
170+
docker run -i --rm \
171+
--user "$(id -u):$(id -g)" \
172+
--workdir="$PWD" \
173+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
174+
-e firmware_url="$firmware_url" \
175+
${{ inputs.docker_image }} \
176+
jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
177+
else
178+
echo "No FIRMWARE provided; skipping firmware artifact update."
179+
fi
180+
181+
- name: Create lava_job_definition
182+
shell: bash
183+
run: |
184+
cd ../job_render
185+
mkdir -p renders
186+
docker run -i --rm \
187+
--user "$(id -u):$(id -g)" \
188+
--workdir="$PWD" \
189+
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
190+
-e TARGET="${{ env.LAVA_NAME }}" \
191+
-e TARGET_DTB="${{ env.MACHINE }}" \
192+
${{ inputs.docker_image }} \
193+
sh -c 'export BOOT_METHOD=fastboot && \
194+
export TARGET=${TARGET} && \
195+
export TARGET_DTB=${TARGET_DTB} && \
196+
python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --video_pre-merge'

0 commit comments

Comments
 (0)