casino/.gitea/workflows/benchmark.yml
Jan Klattenhoff 6a9e379485
Some checks failed
Optimized CI / Frontend Lint & Format (pull_request) Successful in 36s
Optimized CI / Frontend Tests (pull_request) Failing after 26s
Optimized CI / Frontend Build (pull_request) Has been skipped
Optimized CI / Backend Checkstyle & Tests (pull_request) Failing after 1m10s
Optimized CI / Backend Build & Package (pull_request) Has been skipped
Optimized CI / PR Quality Analysis (pull_request) Has been skipped
Optimized CI / Merge Readiness Check (pull_request) Has been skipped
feat(workflows): add performance benchmarking and security scans
2025-03-12 20:39:10 +01:00

241 lines
No EOL
9.1 KiB
YAML

name: Performance Benchmarking
on:
schedule:
- cron: '0 0 * * 1' # Run weekly on Monday at midnight
workflow_dispatch: # Allow manual triggering
jobs:
backend-benchmark:
name: "Backend Performance Benchmark"
container:
image: "cimg/openjdk:23.0-node"
steps:
- name: "Checkout"
uses: actions/checkout@v4
- name: "Setup Gradle"
working-directory: ./backend
run: chmod +x ./gradlew
- name: "Cache Gradle dependencies"
uses: https://github.com/actions/cache@v4
with:
path: |
~/.gradle/caches
~/.gradle/wrapper
backend/.gradle
key: gradle-${{ runner.os }}-${{ hashFiles('backend/build.gradle.kts', 'backend/gradle/wrapper/gradle-wrapper.properties') }}
restore-keys: |
gradle-${{ runner.os }}-
- name: "Set up JMH"
working-directory: ./backend
run: |
cat <<EOT >> build.gradle.kts
plugins {
id("me.champeau.jmh") version "0.7.2"
}
jmh {
iterations.set(5)
fork.set(1)
warmupIterations.set(3)
benchmarkMode.set(listOf("thrpt"))
resultFormat.set("JSON")
resultsFile.set(file("\$buildDir/reports/jmh/results.json"))
}
EOT
- name: "Run JMH Benchmarks"
working-directory: ./backend
run: ./gradlew jmh
- name: "Upload JMH Results"
uses: actions/upload-artifact@v4
with:
name: jmh-benchmark-results
path: backend/build/reports/jmh/
retention-days: 90
- name: "Analyze Performance"
working-directory: ./backend
run: |
echo "### Backend Performance Benchmark Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Full results have been uploaded as an artifact. Summary below:" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Parse JMH results and display top 5 slowest operations
echo "#### Top 5 Slowest Operations" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Benchmark | Mode | Score | Units |" >> $GITHUB_STEP_SUMMARY
echo "| --- | --- | --- | --- |" >> $GITHUB_STEP_SUMMARY
if [ -f build/reports/jmh/results.json ]; then
jq -r '.[] | "\(.benchmark) | \(.mode) | \(.primaryMetric.score) | \(.primaryMetric.scoreUnit)"' build/reports/jmh/results.json |
sort -t '|' -k3 -n |
head -5 >> $GITHUB_STEP_SUMMARY
else
echo "No benchmark results found" >> $GITHUB_STEP_SUMMARY
fi
frontend-benchmark:
name: "Frontend Performance Benchmark"
container:
image: catthehacker/ubuntu:act-latest
steps:
- name: "Checkout"
uses: actions/checkout@v4
- name: "Install Bun"
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
- name: "Cache Dependencies"
uses: actions/cache@v4
with:
path: frontend/node_modules
key: ${{ runner.os }}-bun-${{ hashFiles('frontend/bun.lock') }}
restore-keys: |
${{ runner.os }}-bun-
- name: "Install dependencies"
working-directory: ./frontend
run: bun install --frozen-lockfile
- name: "Set up Lighthouse CI"
working-directory: ./frontend
run: |
bun add -D @lhci/cli puppeteer
# Create Lighthouse CI config
cat <<EOT > lighthouserc.js
module.exports = {
ci: {
collect: {
startServerCommand: 'bun run build && bun run serve:dist',
url: ['http://localhost:8080/'],
numberOfRuns: 3,
},
upload: {
target: 'filesystem',
outputDir: './.lighthouse',
},
assert: {
preset: 'lighthouse:recommended',
assertions: {
'categories:performance': ['error', {minScore: 0.8}],
'categories:accessibility': ['error', {minScore: 0.9}],
'categories:best-practices': ['error', {minScore: 0.9}],
'categories:seo': ['error', {minScore: 0.9}],
}
},
},
};
EOT
# Add serve command for Lighthouse
jq '.scripts += {"serve:dist": "bunx serve -s dist"}' package.json > package.json.new
mv package.json.new package.json
- name: "Run Lighthouse CI"
working-directory: ./frontend
run: bunx lhci autorun || true
- name: "Upload Lighthouse Results"
uses: actions/upload-artifact@v4
with:
name: lighthouse-results
path: frontend/.lighthouse/
retention-days: 90
- name: "Analyze Frontend Performance"
working-directory: ./frontend
run: |
echo "### Frontend Performance Benchmark Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Full results have been uploaded as an artifact. Summary below:" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Extract scores from the JSON report
if [ -d .lighthouse ]; then
REPORT=$(find .lighthouse -name "*.json" | grep -v "manifest" | head -1)
if [ -n "$REPORT" ]; then
PERF_SCORE=$(jq '.categories.performance.score * 100' $REPORT)
ACCESS_SCORE=$(jq '.categories.accessibility.score * 100' $REPORT)
BP_SCORE=$(jq '.categories["best-practices"].score * 100' $REPORT)
SEO_SCORE=$(jq '.categories.seo.score * 100' $REPORT)
echo "#### Lighthouse Scores" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Category | Score |" >> $GITHUB_STEP_SUMMARY
echo "| --- | --- |" >> $GITHUB_STEP_SUMMARY
echo "| Performance | $PERF_SCORE% |" >> $GITHUB_STEP_SUMMARY
echo "| Accessibility | $ACCESS_SCORE% |" >> $GITHUB_STEP_SUMMARY
echo "| Best Practices | $BP_SCORE% |" >> $GITHUB_STEP_SUMMARY
echo "| SEO | $SEO_SCORE% |" >> $GITHUB_STEP_SUMMARY
# Extract opportunities for improvement
echo "" >> $GITHUB_STEP_SUMMARY
echo "#### Top Improvement Opportunities" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
jq -r '.audits | to_entries[] | select(.value.score != null and .value.score < 1 and .value.details.type == "opportunity") | "\(.value.title) | \(.value.displayValue)"' $REPORT |
head -5 |
awk '{print "- " $0}' >> $GITHUB_STEP_SUMMARY
else
echo "No Lighthouse reports found" >> $GITHUB_STEP_SUMMARY
fi
else
echo "No Lighthouse results directory found" >> $GITHUB_STEP_SUMMARY
fi
performance-report:
name: "Performance Report"
needs: [backend-benchmark, frontend-benchmark]
if: always()
steps:
- name: "Checkout"
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: "Generate Historical Comparison"
run: |
echo "# 📊 Performance Benchmarking Report" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Get previous tag for comparison
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "None")
PREV_TAG=$(git describe --tags --abbrev=0 "$LATEST_TAG"^1 2>/dev/null || echo "None")
echo "### Benchmark Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Current benchmarks run against: $LATEST_TAG" >> $GITHUB_STEP_SUMMARY
echo "Previous benchmarks compared to: $PREV_TAG" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Performance reports have been uploaded as artifacts. Review them for detailed metrics." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Check status of benchmark jobs
BACKEND_STATUS="${{ needs.backend-benchmark.result }}"
FRONTEND_STATUS="${{ needs.frontend-benchmark.result }}"
echo "| Component | Status |" >> $GITHUB_STEP_SUMMARY
echo "| --- | --- |" >> $GITHUB_STEP_SUMMARY
if [ "$BACKEND_STATUS" == "success" ]; then
echo "| Backend Benchmark | ✅ Complete |" >> $GITHUB_STEP_SUMMARY
else
echo "| Backend Benchmark | ⚠️ Issues occurred |" >> $GITHUB_STEP_SUMMARY
fi
if [ "$FRONTEND_STATUS" == "success" ]; then
echo "| Frontend Benchmark | ✅ Complete |" >> $GITHUB_STEP_SUMMARY
else
echo "| Frontend Benchmark | ⚠️ Issues occurred |" >> $GITHUB_STEP_SUMMARY
fi