Skip to content

Commit 4681549

Browse files
authored
Merge pull request #20 from uwescience/model_versioning
adding model benchmarking workflow
2 parents 49ce33e + 5ad32ec commit 4681549

14 files changed

+358
-0
lines changed
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
name: Create Benchmark Website
2+
3+
on:
4+
# Runs on pushes targeting the default branch
5+
push:
6+
branches: ["main", "model_versioning"]
7+
pull_request:
8+
types: ["opened", "reopened", "synchronize", "edited"]
9+
10+
# Allows you to run this workflow manually from the Actions tab
11+
workflow_dispatch:
12+
13+
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
14+
permissions:
15+
pages: write
16+
id-token: write
17+
18+
jobs:
19+
# Build Website
20+
build:
21+
runs-on: ubuntu-latest
22+
defaults:
23+
run:
24+
shell: bash -l {0}
25+
steps:
26+
- name: Checkout
27+
uses: actions/checkout@v4
28+
29+
- uses: actions/setup-python@v5
30+
with:
31+
python-version: '3.11'
32+
33+
34+
35+
- name: Install Python dependencies
36+
run: |
37+
pip install "dask[dataframe]"
38+
pip install ipykernel
39+
pip install nbconvert
40+
41+
- name: Build Website
42+
shell: bash -el {0}
43+
run: |
44+
cd ambient_sound_analysis
45+
jupyter nbconvert display_benchmarks.ipynb --execute --to html --output-dir=_build/html --no-input
46+
47+
- name: Upload artifact
48+
uses: actions/upload-pages-artifact@v3
49+
with:
50+
path: ambient_sound_analysis/_build/html
51+
52+
53+
# Publish Website to GitHub Pages if built successfully
54+
deploy:
55+
needs: build
56+
# if: github.ref == 'refs/heads/model_versioning'
57+
runs-on: ubuntu-latest
58+
environment:
59+
name: github-pages
60+
url: ${{ steps.deployment.outputs.page_url }}
61+
62+
steps:
63+
- name: Setup Pages
64+
uses: actions/configure-pages@v5
65+
66+
- name: Deploy to GitHub Pages
67+
id: deployment
68+
uses: actions/deploy-pages@v4
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
name: Model Benchmarking
2+
on:
3+
pull_request:
4+
types: [opened, synchronize, edited, reopened]
5+
6+
env:
7+
pr_username: github.event.pull_request.user.login
8+
9+
# schedule:
10+
# - cron: '9 * * * *'
11+
12+
jobs:
13+
process:
14+
runs-on: ubuntu-latest
15+
steps:
16+
- uses: actions/checkout@v4
17+
18+
- uses: actions/setup-python@v5
19+
with:
20+
python-version: '3.9.18'
21+
cache: 'pip'
22+
cache-dependency-path: ambient_sound_analysis/requirements_noise.txt
23+
24+
- name: Install Python dependencies
25+
run: |
26+
pip install -r ambient_sound_analysis/requirements_noise.txt
27+
28+
29+
# - name: Install ffmpeg
30+
# run: |
31+
# sudo apt-get update
32+
# sudo apt-get install ffmpeg
33+
34+
- uses: awalsh128/cache-apt-pkgs-action@latest
35+
with:
36+
packages: ffmpeg
37+
38+
- name: Create plots
39+
if: steps.cache.outputs.cache-hit != 'true'
40+
run: |
41+
python ambient_sound_analysis/model_benchmarking.py
42+
43+
44+
45+
- name: Append username
46+
run: |
47+
# sed 's/$/ test trigger/' 'ambient_sound_analysis/csv/2024-06-01 17:00:00.csv' > 'ambient_sound_analysis/csv/2024-06-01 17:00:00_new.csv'
48+
49+
export output_file="score_${{ github.event.pull_request.head.sha }}.csv"
50+
sed 's/$/, ${{ github.event.pull_request.user.login }}, ${{ github.event.pull_request.head.sha }}, ${{github.event.pull_request.title}}/' 'ambient_sound_analysis/csv/test.csv' > "ambient_sound_analysis/csv/$output_file"
51+
52+
- name: Upload to GitHub repo
53+
uses: stefanzweifel/git-auto-commit-action@v5
54+
with:
55+
commit_message: Commit to Github
56+
file_pattern: 'ambient_sound_analysis/csv/score*.csv'
57+
58+
#- name: Upload as artifact
59+
# uses: actions/upload-artifact@v4
60+
# with:
61+
# name: ambient_sound_plots
62+
# path: ambient_sound_analysis/img/*.png
63+
64+
#- name: Upload to Google Drive
65+
# uses: AnimMouse/setup-rclone@v1
66+
# with:
67+
# rclone_config: ${{secrets.RCLONE_CONFIG}}
68+
69+
# - run: |
70+
# rclone copy ambient_sound_analysis/img/broadband.png mydrive:rclone_uploads/
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, 1f3ac5bc8b3a4914606ceebcbd910a5aac64e50e, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, 28401087ddea6a1ac2829d708423b362b8d3840f, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, 42aa52eddaf7494cf272de17396907146982e1fd, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, 66b46ac47aa1c2bcbda573e147f3a16eeed61dcd, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, 71236d3146ee8b3354b3d8fea71d69028b83126b, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
1, valentina-s, 72b13caee35f09369b3d3c6b082e71bac68316c0, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, a3d0ab1feae8b9b91c923229c4cf3d5b06e05f98, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, cd12ce8cad03004c315e2c175fd61b3ebecdc3be, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, f38875f0d0e246c9035abb2f76602e1a89479171, adding model benchmarking workflow
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
17, valentina-s, fc9e91bb543073453639b8989614702fdeaaa1e4, adding model benchmarking workflow
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "13bdddf3-f048-4bb2-801d-6a8473fa7d9d",
6+
"metadata": {},
7+
"source": [
8+
"# Latest Benchmarks"
9+
]
10+
},
11+
{
12+
"cell_type": "code",
13+
"execution_count": 68,
14+
"id": "9b8f9eff-5009-4d4e-9e17-a9f14960c073",
15+
"metadata": {},
16+
"outputs": [],
17+
"source": [
18+
"import dask.dataframe as ddf"
19+
]
20+
},
21+
{
22+
"cell_type": "code",
23+
"execution_count": 69,
24+
"id": "dad42a94-20aa-4976-8ab3-558dc11c67c7",
25+
"metadata": {},
26+
"outputs": [],
27+
"source": [
28+
"df = ddf.read_csv(\"csv/*.csv\", header=None, index_col=False)"
29+
]
30+
},
31+
{
32+
"cell_type": "code",
33+
"execution_count": 70,
34+
"id": "ae56d8d7-aea5-4a09-971d-a05afcfad2ac",
35+
"metadata": {},
36+
"outputs": [],
37+
"source": [
38+
"df = df.compute().reset_index(drop=True)"
39+
]
40+
},
41+
{
42+
"cell_type": "code",
43+
"execution_count": 71,
44+
"id": "f4354a33-83ca-4878-bb15-2ea116f32996",
45+
"metadata": {},
46+
"outputs": [],
47+
"source": [
48+
"df.columns = [\"# Ships\", \"username\", \"SHA\", \"title\"]"
49+
]
50+
},
51+
{
52+
"cell_type": "code",
53+
"execution_count": 72,
54+
"id": "a22b796f-9a02-4d3c-a3b4-335270e0e347",
55+
"metadata": {},
56+
"outputs": [
57+
{
58+
"data": {
59+
"text/html": [
60+
"<div>\n",
61+
"<style scoped>\n",
62+
" .dataframe tbody tr th:only-of-type {\n",
63+
" vertical-align: middle;\n",
64+
" }\n",
65+
"\n",
66+
" .dataframe tbody tr th {\n",
67+
" vertical-align: top;\n",
68+
" }\n",
69+
"\n",
70+
" .dataframe thead th {\n",
71+
" text-align: right;\n",
72+
" }\n",
73+
"</style>\n",
74+
"<table border=\"1\" class=\"dataframe\">\n",
75+
" <thead>\n",
76+
" <tr style=\"text-align: right;\">\n",
77+
" <th></th>\n",
78+
" <th># Ships</th>\n",
79+
" <th>username</th>\n",
80+
" <th>SHA</th>\n",
81+
" <th>title</th>\n",
82+
" </tr>\n",
83+
" </thead>\n",
84+
" <tbody>\n",
85+
" <tr>\n",
86+
" <th>0</th>\n",
87+
" <td>1</td>\n",
88+
" <td>valentina-s</td>\n",
89+
" <td>1dbb9106cd9ff69b73a7171f938285650a97d0bb</td>\n",
90+
" <td>$ {{github.event.pull_request.title}}</td>\n",
91+
" </tr>\n",
92+
" <tr>\n",
93+
" <th>1</th>\n",
94+
" <td>1</td>\n",
95+
" <td>valentina-s</td>\n",
96+
" <td>72b13caee35f09369b3d3c6b082e71bac68316c0</td>\n",
97+
" <td>adding model benchmarking workflow</td>\n",
98+
" </tr>\n",
99+
" </tbody>\n",
100+
"</table>\n",
101+
"</div>"
102+
],
103+
"text/plain": [
104+
" # Ships username SHA \\\n",
105+
"0 1 valentina-s 1dbb9106cd9ff69b73a7171f938285650a97d0bb \n",
106+
"1 1 valentina-s 72b13caee35f09369b3d3c6b082e71bac68316c0 \n",
107+
"\n",
108+
" title \n",
109+
"0 $ {{github.event.pull_request.title}} \n",
110+
"1 adding model benchmarking workflow "
111+
]
112+
},
113+
"execution_count": 72,
114+
"metadata": {},
115+
"output_type": "execute_result"
116+
}
117+
],
118+
"source": [
119+
"df"
120+
]
121+
}
122+
],
123+
"metadata": {
124+
"kernelspec": {
125+
"display_name": "Python 3 (ipykernel)",
126+
"language": "python",
127+
"name": "python3"
128+
},
129+
"language_info": {
130+
"codemirror_mode": {
131+
"name": "ipython",
132+
"version": 3
133+
},
134+
"file_extension": ".py",
135+
"mimetype": "text/x-python",
136+
"name": "python",
137+
"nbconvert_exporter": "python",
138+
"pygments_lexer": "ipython3",
139+
"version": "3.9.18"
140+
}
141+
},
142+
"nbformat": 4,
143+
"nbformat_minor": 5
144+
}
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# importing general Python libraries
2+
import pandas as pd
3+
import datetime as dt
4+
import os
5+
import matplotlib.pyplot as plt
6+
import pytz
7+
import plotly.graph_objects as go
8+
import numpy as np
9+
10+
# importing orcasound_noise libraries
11+
from orcasound_noise.pipeline.pipeline import NoiseAnalysisPipeline
12+
from orcasound_noise.utils import Hydrophone
13+
from orcasound_noise.pipeline.acoustic_util import plot_spec, plot_bb
14+
15+
16+
# Set Location and Resolution
17+
# Port Townsend, 1 Hz Frequency, 60-second samples
18+
if __name__ == '__main__':
19+
pipeline = NoiseAnalysisPipeline(Hydrophone.ORCASOUND_LAB,
20+
delta_f=10, bands=None,
21+
delta_t=60, mode='safe')
22+
23+
24+
25+
26+
# Generate parquet dataframes with noise levels for a time period
27+
28+
# now = dt.datetime.now(pytz.timezone('US/Pacific'))
29+
# fix time
30+
now = dt.datetime(2024, 6, 1, 17, 0, 0)
31+
psd_path, broadband_path = pipeline.generate_parquet_file(now - dt.timedelta(hours = 13),
32+
now - dt.timedelta(hours = 8),
33+
upload_to_s3=False)
34+
35+
# Read the parquet files
36+
# psd_df = pd.read_parquet(psd_path)
37+
bb_df = pd.read_parquet(broadband_path)
38+
39+
# set threshold
40+
threshold = 5
41+
42+
import numpy as np
43+
nof_ships = (np.diff((bb_df['0']>threshold).astype('uint8'))==1).sum()
44+
45+
46+
# Create a new directory if it does not exist
47+
if not os.path.exists('ambient_sound_analysis/csv'):
48+
os.makedirs('ambient_sound_analysis/csv')
49+
50+
# pd.DataFrame([nof_ships]).to_csv('ambient_sound_analysis/csv/'+str(now)+'.csv', header=False, index=False)
51+
52+
print("Nof Ships: "+str(nof_ships))
53+
pd.DataFrame([nof_ships]).to_csv('ambient_sound_analysis/csv/test.csv', header=False, index=False)
54+
55+
56+
57+
58+
59+
60+
# Create and save psd plot
61+
# fig = plot_spec(psd_df)
62+
# fig.write_image('ambient_sound_analysis/img/psd.png')
63+
64+
# Create and save bb plot
65+
# fig = plot_bb(bb_df)
66+
# fig.savefig('ambient_sound_analysis/img/broadband.png')

0 commit comments

Comments
 (0)