Added multi-thresh simulation to "full" and "short" (currently running).

Added complete "rect-lp" analysis except figure.
Added multiple appendix figs.
Overhauled normalization options across all condense scripts.

Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
j-hartling
2026-04-24 16:50:14 +02:00
parent 1a586848e8
commit 5411a309f7
48 changed files with 1549 additions and 300 deletions

View File

@@ -17,7 +17,7 @@ target_species = [
'Gomphocerippus_rufus',
'Omocestus_rufipes',
'Pseudochorthippus_parallelus',
][6]
][5]
example_file = {
'Chorthippus_biguttulus': 'Chorthippus_biguttulus_GBC_94-17s73.1ms-19s977ms',
'Chorthippus_mollis': 'Chorthippus_mollis_DJN_41_T28C-46s4.58ms-1m15s697ms',
@@ -29,7 +29,7 @@ example_file = {
}[target_species]
data_paths = search_files(target_species, dir='../data/processed/')
noise_path = '../data/processed/white_noise_sd-1.npz'
ref_path = '../data/inv/short/ref_measures.npz'
thresh_path = '../data/inv/short/thresholds.npz'
pre_stages = ['filt', 'env']
stages = pre_stages + ['inv', 'conv', 'feat']
save_path = '../data/inv/short/'
@@ -38,26 +38,17 @@ save_path = '../data/inv/short/'
example_scales = np.array([0.1, 1, 10, 30, 100, 300])
scales = np.geomspace(0.01, 10000, 500)
scales = np.unique(np.concatenate(([0], scales, example_scales)))
thresh_rel = 0.5
thresh_rel = np.array([0, 0.5, 1, 1.5, 2, 2.5, 3])
# SUBSET SETTINGS:
kernels = np.array([
[1, 0.002],
[-1, 0.002],
[2, 0.004],
[-2, 0.004],
[3, 0.032],
[-3, 0.032]
])
kernels = None
types = None#np.array([-1])
sigmas = None#np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032])
types = None
sigmas = None
# PREPARATION:
pure_noise = np.load(noise_path)['raw']
if thresh_rel is not None:
# Get threshold values from pure-noise response SD:
thresh_abs = np.load(ref_path)['conv'] * thresh_rel
thresh_data = dict(np.load(thresh_path))
thresh_abs = thresh_rel[:, None] * thresh_data['sds'][None, :]
# EXECUTION:
for data_path, name in zip(data_paths, crop_paths(data_paths)):
@@ -68,17 +59,13 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)):
data, config = load_data(data_path, files='raw')
song, rate = data['raw'], config['rate']
if thresh_rel is not None:
# Set kernel-specific thresholds:
config['feat_thresh'] = thresh_abs
# Reduce to kernel subset:
if any(var is not None for var in [kernels, types, sigmas]):
kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas)
config['kernels'] = config['kernels'][:, kern_inds]
config['k_specs'] = config['k_specs'][kern_inds, :]
config['k_props'] = [config['k_props'][i] for i in kern_inds]
config['feat_thresh'] = config['feat_thresh'][kern_inds]
thresh_abs = thresh_abs[:, kern_inds]
# Get song segment to be analyzed:
time = np.arange(song.shape[0]) / rate
@@ -100,7 +87,7 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)):
measure_env=np.zeros(shape_low, dtype=float),
measure_inv=np.zeros(shape_low, dtype=float),
measure_conv=np.zeros(shape_high, dtype=float),
measure_feat=np.zeros(shape_high, dtype=float)
measure_feat=np.zeros(shape_high + (thresh_rel.size,), dtype=float)
)
if save_detailed:
# Prepare optional storage:
@@ -111,7 +98,7 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)):
snip_env=np.zeros(shape_low, dtype=float),
snip_inv=np.zeros(shape_low, dtype=float),
snip_conv=np.zeros(shape_high, dtype=float),
snip_feat=np.zeros(shape_high, dtype=float)
snip_feat=np.zeros(shape_high + (thresh_rel.size,), dtype=float)
)
# Execute piecewise:
@@ -129,29 +116,38 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)):
signals['inv'] = sosfilter(signals['env'], rate, config['inv_fcut'], 'hp',
padtype='constant', padlen=config['padlen'])
signals['conv'] = convolve_kernels(signals['inv'], config['kernels'], config['k_specs'])
signals['feat'] = sosfilter((signals['conv'] > config['feat_thresh']).astype(float),
rate, config['feat_fcut'], 'lp',
padtype='fixed', padlen=config['padlen'])
# Store results:
for stage in stages:
# Store non-feature results:
for stage in stages[:-1]:
# Log intensity measures:
mkey = f'measure_{stage}'
if stage == 'feat':
measures[mkey][i] = signals[stage][segment, :].mean(axis=0)
else:
measures[mkey][i] = signals[stage][segment, ...].std(axis=0)
measures[f'measure_{stage}'][i] = signals[stage][segment, ...].std(axis=0)
# Log optional snippet data:
if save_detailed and scale in example_scales:
scale_ind = np.nonzero(example_scales == scale)[0][0]
snippets[f'snip_{stage}'][:, ..., scale_ind] = signals[stage]
# Execute piecewise again:
for j, thresholds in enumerate(thresh_abs):
# Finalize processing:
feat = sosfilter((signals['conv'] > thresholds).astype(float),
rate, config['feat_fcut'], 'lp',
padtype='fixed', padlen=config['padlen'])
# Log intensity measure:
measures['measure_feat'][i, :, j] = feat[segment, :].mean(axis=0)
# Log optional snippet data:
if save_detailed and scale in example_scales:
snippets['snip_feat'][:, :, scale_ind, j] = feat
# Save analysis results:
if save_path is not None:
data = dict(
scales=scales,
example_scales=example_scales,
thresh_rel=thresh_rel,
thresh_abs=thresh_abs,
)
data.update(measures)
if save_detailed: