Files
paper_2025/python/save_inv_data_thresh-lp.py
2026-03-10 17:48:10 +01:00

132 lines
4.4 KiB
Python

import glob
import numpy as np
import matplotlib.pyplot as plt
from thunderhopper.modeltools import load_data, save_data
from thunderhopper.filetools import crop_paths
from thunderhopper.filters import sosfilter
from IPython import embed
# GENERAL SETTINGS:
target = 'Omocestus_rufipes'
data_paths = glob.glob(f'../data/processed/{target}*.npz')
save_path = '../data/inv/thresh_lp/'
# ANALYSIS SETTINGS:
add_noise = False
thresh_percent = 90
example_scales = np.array([0, 0.5, 1, 10, 50])
scales = np.geomspace(0.01, 50, 100)
scales = np.unique(np.concatenate((scales, example_scales)))
plot_results = True
# EXECUTION:
for data_path, name in zip(data_paths, crop_paths(data_paths)):
print(f'Processing {name}')
save_name = save_path + name
# Get pure-song kernel responses:
data, config = load_data(data_path, files='conv')
song, rate = data['conv'], data['conv_rate']
# Get song segment to be analyzed:
time = np.arange(song.shape[0]) / rate
start, end = data['songs_0'].ravel()
segment = (time >= start) & (time <= end)
# Normalize song component:
song /= song[segment, :].std(axis=0)
if add_noise:
# Get normalized noise:
rng = np.random.default_rng()
noise = rng.normal(size=(song.shape[0], 1))
noise /= noise[segment].std()
# Prepare noise-bound threshold:
threshold = np.percentile(noise, thresh_percent, axis=0)
else:
# Reuse threshold from previous noise run:
threshold = np.load(save_name + '_noise.npz')['thresh']
# Prepare snippet storage:
shape = song.shape + (example_scales.size,)
conv = np.zeros(shape, dtype=float)
bi = np.zeros(shape, dtype=float)
feat = np.zeros(shape, dtype=float)
# Prepare measure storage:
shape = (scales.size, song.shape[1])
measure_conv = np.zeros(shape, dtype=float)
measure_feat = np.zeros(shape, dtype=float)
# Execute piecewise:
for i, scale in enumerate(scales):
print('Simulating scale ', scale)
# Rescale song component:
scaled_conv = song * scale
if add_noise:
# Add noise:
scaled_conv += noise
# Process mixture:
scaled_bi = (scaled_conv > threshold).astype(float)
scaled_feat = sosfilter(scaled_bi, rate, config['feat_fcut'], 'lp',
padtype='fixed', padlen=config['padlen'])
# Log snippet data:
if scale in example_scales:
scale_ind = np.nonzero(example_scales == scale)[0][0]
conv[:, :, scale_ind] = scaled_conv
bi[:, :, scale_ind] = scaled_bi
feat[:, :, scale_ind] = scaled_feat
# Get "intensity measure" per stage:
measure_conv[i] = scaled_conv[segment, :].std(axis=0)
measure_feat[i] = scaled_feat[segment, :].mean(axis=0)
# # Relate to smallest scale:
# base_ind = np.argmin(scales)
# measure_conv /= measure_conv[base_ind, :]
if plot_results:
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(scales, measure_conv)
ax1.plot(scales, measure_conv.mean(axis=1), c='k')
ax1.plot(scales, np.median(measure_conv, axis=1), c='k', ls='--')
ax2.plot(scales, measure_feat)
ax2.plot(scales, np.nanmean(measure_feat, axis=1), c='k')
ax2.plot(scales, np.nanmedian(measure_feat, axis=1), c='k', ls='--')
plt.show()
# Condense measures across kernels:
spread_conv = np.zeros((2, scales.size))
spread_conv[0] = np.nanpercentile(measure_conv, 25, axis=1)
spread_conv[1] = np.nanpercentile(measure_conv, 75, axis=1)
measure_conv = np.nanmedian(measure_conv, axis=1)
spread_feat = np.zeros((2, scales.size))
spread_feat[0] = np.nanpercentile(measure_feat, 25, axis=1)
spread_feat[1] = np.nanpercentile(measure_feat, 75, axis=1)
measure_feat = np.nanmedian(measure_feat, axis=1)
# Save analysis results:
if save_path is not None:
data = dict(
scales=scales,
example_scales=example_scales,
conv=conv,
bi=bi,
feat=feat,
measure_conv=measure_conv,
spread_conv=spread_conv,
measure_feat=measure_feat,
spread_feat=spread_feat,
thresh=threshold,
thresh_perc=thresh_percent,
)
if add_noise:
save_name += '_noise'
save_data(save_name, data, config, overwrite=True)
print('Done.')
embed()