Again, numerous changes.
Mostly figure polishing and fixing. Crucial fix to "short" invariance analysis.
This commit is contained in:
45
python/collect_inv_data_field.py
Normal file
45
python/collect_inv_data_field.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import numpy as np
|
||||
from thunderhopper.filetools import search_files
|
||||
from thunderhopper.modeltools import load_data, save_data
|
||||
from IPython import embed
|
||||
|
||||
# GENERAL SETTINGS:
|
||||
target_species = ['Pseudochorthippus_parallelus']
|
||||
mode = ['song', 'noise'][1]
|
||||
stages = ['raw', 'filt', 'env', 'log', 'inv', 'conv', 'feat']
|
||||
search_path = f'../data/inv/field/{mode}/'
|
||||
save_path = f'../data/inv/field/{mode}/collected/'
|
||||
|
||||
# EXECUTION:
|
||||
for i, species in enumerate(target_species):
|
||||
print(f'Processing {species}')
|
||||
|
||||
# Fetch all species-specific song files:
|
||||
all_paths = search_files(species, ext='npz', dir=search_path)
|
||||
if not all_paths:
|
||||
continue
|
||||
|
||||
# Run through files:
|
||||
for j, path in enumerate(all_paths):
|
||||
|
||||
# Load invariance data:
|
||||
data, config = load_data(path, 'distances', 'measure')
|
||||
|
||||
if j == 0:
|
||||
# Prepare species-specific storage:
|
||||
species_data = dict(scales=data['distances'])
|
||||
for stage in stages:
|
||||
mkey = f'measure_{stage}'
|
||||
shape = data[mkey].shape + (len(all_paths),)
|
||||
species_data[mkey] = np.zeros(shape, dtype=float)
|
||||
|
||||
# Log species data:
|
||||
for stage in stages:
|
||||
mkey = f'measure_{stage}'
|
||||
species_data[mkey][..., j] = data[mkey]
|
||||
|
||||
# Save collected file data:
|
||||
save_name = save_path + species
|
||||
save_data(save_name, species_data, config, overwrite=True)
|
||||
|
||||
print('Done.')
|
||||
123
python/condense_inv_data_field.py
Normal file
123
python/condense_inv_data_field.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import numpy as np
|
||||
from thunderhopper.filetools import search_files, crop_paths
|
||||
from thunderhopper.modeltools import load_data, save_data
|
||||
from IPython import embed
|
||||
|
||||
def sort_files_by_rec(paths, sources=['JJ', 'SLO']):
|
||||
# Separate by source:
|
||||
sorted_paths = {}
|
||||
for source in sources:
|
||||
|
||||
# Check for any source-specific song files:
|
||||
source_paths = [path for path in paths if source in path]
|
||||
if not source_paths:
|
||||
continue
|
||||
|
||||
# Separate by recording:
|
||||
sorted_paths[source] = {}
|
||||
for path, name in zip(source_paths, crop_paths(source_paths)):
|
||||
|
||||
# Find global time stamp behind source tag:
|
||||
ind = name.find(source) + len(source) + 1
|
||||
time_stamps = name[ind:].split('_')[-1]
|
||||
global_time = '-'.join(time_stamps.split('-')[:2])
|
||||
|
||||
if global_time in sorted_paths[source]:
|
||||
# Found existing time stamp (known recording):
|
||||
sorted_paths[source][global_time].append(path)
|
||||
else:
|
||||
# Found new time stamp (novel recording):
|
||||
sorted_paths[source][global_time] = [path]
|
||||
|
||||
# Re-sort song files by recording only (discarding source separation):
|
||||
flat_sorted = []
|
||||
for source_paths in sorted_paths.values():
|
||||
for rec_paths in source_paths.values():
|
||||
flat_sorted.append(rec_paths)
|
||||
return flat_sorted
|
||||
|
||||
|
||||
# GENERAL SETTINGS:
|
||||
target_species = ['Pseudochorthippus_parallelus']
|
||||
mode = ['song', 'noise'][0]
|
||||
stages = ['raw', 'filt', 'env', 'log', 'inv', 'conv', 'feat']
|
||||
search_path = f'../data/inv/field/{mode}/'
|
||||
save_path = f'../data/inv/field/{mode}/condensed/'
|
||||
sources = [
|
||||
'JJ',
|
||||
'SLO',
|
||||
]
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
normalization = 'none'
|
||||
if mode == 'song':
|
||||
normalization = [
|
||||
'none',
|
||||
# 'base',
|
||||
'range'
|
||||
][-1]
|
||||
|
||||
# EXECUTION:
|
||||
for i, species in enumerate(target_species):
|
||||
print(f'Processing {species}')
|
||||
|
||||
# Fetch all species-specific song files:
|
||||
all_paths = search_files(species, ext='npz', dir=search_path)
|
||||
if not all_paths:
|
||||
continue
|
||||
|
||||
# Sort song files by recording (one or more per source):
|
||||
sorted_paths = sort_files_by_rec(all_paths, sources)
|
||||
|
||||
# Condense across song files per recording:
|
||||
for j, rec_paths in enumerate(sorted_paths):
|
||||
for k, path in enumerate(rec_paths):
|
||||
|
||||
# Load invariance data:
|
||||
data, config = load_data(path, 'distances', 'measure')
|
||||
|
||||
if k == 0:
|
||||
# Prepare song file-specific storage:
|
||||
file_data = {}
|
||||
for stage in stages:
|
||||
shape = data[f'measure_{stage}'].shape + (len(rec_paths),)
|
||||
file_data[stage] = np.zeros(shape, dtype=float)
|
||||
if j == 0:
|
||||
# Prepare recording-specific storage:
|
||||
rec_mean, rec_sd = {}, {}
|
||||
for stage in stages:
|
||||
shape = data[f'measure_{stage}'].shape + (len(sorted_paths),)
|
||||
rec_mean[f'mean_{stage}'] = np.zeros(shape, dtype=float)
|
||||
rec_sd[f'sd_{stage}'] = np.zeros(shape, dtype=float)
|
||||
|
||||
# Log song file data:
|
||||
for stage in stages:
|
||||
mkey = f'measure_{stage}'
|
||||
|
||||
if normalization == 'range':
|
||||
# Min-max normalization:
|
||||
min_measure = data[mkey].min(axis=0, keepdims=True)
|
||||
max_measure = data[mkey].max(axis=0, keepdims=True)
|
||||
data[mkey] = (data[mkey] - min_measure) / (max_measure - min_measure)
|
||||
|
||||
file_data[stage][..., k] = data[mkey]
|
||||
|
||||
# Get recording statistics:
|
||||
for stage in stages:
|
||||
rec_mean[f'mean_{stage}'][..., j] = np.nanmean(file_data[stage], axis=-1)
|
||||
rec_sd[f'sd_{stage}'][..., j] = np.nanstd(file_data[stage], axis=-1)
|
||||
|
||||
# Save condensed recording data:
|
||||
save_name = save_path + species
|
||||
if normalization == 'none':
|
||||
save_name += '_unnormed'
|
||||
elif normalization == 'base':
|
||||
save_name += '_norm-base'
|
||||
elif normalization == 'range':
|
||||
save_name += '_norm-range'
|
||||
archive = dict(distances=data['distances'])
|
||||
archive.update(rec_mean)
|
||||
archive.update(rec_sd)
|
||||
save_data(save_name, archive, config, overwrite=True)
|
||||
|
||||
print('Done.')
|
||||
@@ -26,7 +26,11 @@ search_path = '../data/inv/full/'
|
||||
save_path = '../data/inv/full/condensed/'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
compute_ratios = False
|
||||
normalization = [
|
||||
'none',
|
||||
'base',
|
||||
'range'
|
||||
][2]
|
||||
|
||||
# EXECUTION:
|
||||
for i, species in enumerate(target_species):
|
||||
@@ -64,8 +68,16 @@ for i, species in enumerate(target_species):
|
||||
# Log song file data:
|
||||
for stage in stages:
|
||||
mkey = f'measure_{stage}'
|
||||
if compute_ratios:
|
||||
|
||||
if normalization == 'base':
|
||||
# Noise baseline normalization:
|
||||
data[mkey] /= data[mkey][0]
|
||||
elif normalization == 'range':
|
||||
# Min-max normalization:
|
||||
min_measure = data[mkey].min(axis=0, keepdims=True)
|
||||
max_measure = data[mkey].max(axis=0, keepdims=True)
|
||||
data[mkey] = (data[mkey] - min_measure) / (max_measure - min_measure)
|
||||
|
||||
file_data[stage][..., k] = data[mkey]
|
||||
|
||||
# Get recording statistics:
|
||||
@@ -75,10 +87,12 @@ for i, species in enumerate(target_species):
|
||||
|
||||
# Save condensed recording data:
|
||||
save_name = save_path + species
|
||||
if compute_ratios:
|
||||
save_name += '_normed'
|
||||
else:
|
||||
save_name += '_raw'
|
||||
if normalization == 'none':
|
||||
save_name += '_unnormed'
|
||||
elif normalization == 'base':
|
||||
save_name += '_norm-base'
|
||||
elif normalization == 'range':
|
||||
save_name += '_norm-range'
|
||||
archive = dict(scales=data['scales'])
|
||||
archive.update(rec_mean)
|
||||
archive.update(rec_sd)
|
||||
|
||||
@@ -26,7 +26,11 @@ search_path = '../data/inv/short/'
|
||||
save_path = '../data/inv/short/condensed/'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
compute_ratios = False
|
||||
normalization = [
|
||||
'none',
|
||||
'base',
|
||||
'range'
|
||||
][1]
|
||||
|
||||
# EXECUTION:
|
||||
for i, species in enumerate(target_species):
|
||||
@@ -64,8 +68,16 @@ for i, species in enumerate(target_species):
|
||||
# Log song file data:
|
||||
for stage in stages:
|
||||
mkey = f'measure_{stage}'
|
||||
if compute_ratios:
|
||||
|
||||
if normalization == 'base':
|
||||
# Noise baseline normalization:
|
||||
data[mkey] /= data[mkey][0]
|
||||
elif normalization == 'range':
|
||||
# Min-max normalization:
|
||||
min_measure = data[mkey].min(axis=0, keepdims=True)
|
||||
max_measure = data[mkey].max(axis=0, keepdims=True)
|
||||
data[mkey] = (data[mkey] - min_measure) / (max_measure - min_measure)
|
||||
|
||||
file_data[stage][..., k] = data[mkey]
|
||||
|
||||
# Get recording statistics:
|
||||
@@ -75,10 +87,12 @@ for i, species in enumerate(target_species):
|
||||
|
||||
# Save condensed recording data:
|
||||
save_name = save_path + species
|
||||
if compute_ratios:
|
||||
save_name += '_normed'
|
||||
else:
|
||||
save_name += '_raw'
|
||||
if normalization == 'none':
|
||||
save_name += '_unnormed'
|
||||
elif normalization == 'base':
|
||||
save_name += '_norm-base'
|
||||
elif normalization == 'range':
|
||||
save_name += '_norm-range'
|
||||
archive = dict(scales=data['scales'])
|
||||
archive.update(rec_mean)
|
||||
archive.update(rec_sd)
|
||||
|
||||
@@ -26,8 +26,8 @@ search_path = '../data/inv/thresh_lp/'
|
||||
save_path = '../data/inv/thresh_lp/condensed/'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
with_noise = True
|
||||
plot_overview = True
|
||||
with_noise = False
|
||||
plot_overview = False
|
||||
thresh_rel = np.array([0.5, 1, 3])
|
||||
|
||||
# PREPARATION:
|
||||
|
||||
@@ -4,10 +4,11 @@ import matplotlib.pyplot as plt
|
||||
from itertools import product
|
||||
from thunderhopper.filetools import search_files
|
||||
from thunderhopper.modeltools import load_data
|
||||
from thunderhopper.filtertools import find_kern_specs
|
||||
from misc_functions import get_saturation
|
||||
from color_functions import load_colors
|
||||
from plot_functions import hide_axis, ylimits, xlabel, ylabel, title_subplot,\
|
||||
plot_line, strip_zeros, time_bar,\
|
||||
plot_line, strip_zeros, time_bar, set_clip_box,\
|
||||
letter_subplot, letter_subplots
|
||||
from IPython import embed
|
||||
|
||||
@@ -28,10 +29,19 @@ def plot_curves(ax, scales, measures, fill_kwargs={}, **kwargs):
|
||||
ax.fill_between(scales, *spread_measure, **fill_kwargs)
|
||||
return median_measure
|
||||
|
||||
def show_saturation(ax, scales, measures, high=0.95, **kwargs):
|
||||
high_ind = get_saturation(measures, high=high)[1]
|
||||
return ax.plot(scales[high_ind], 0, transform=ax.get_xaxis_transform(),
|
||||
marker='o', ms=10, zorder=6, clip_on=False, **kwargs)
|
||||
def exclude_zero_scale(data, stages):
|
||||
inds = data['scales'] > 0
|
||||
data['scales'] = data['scales'][inds]
|
||||
for stage in stages:
|
||||
data[f'mean_{stage}'] = data[f'mean_{stage}'][inds, ...]
|
||||
return data
|
||||
|
||||
def reduce_kernel_set(data, inds, keyword, stages=['conv', 'feat']):
|
||||
for stage in stages:
|
||||
key = f'{keyword}_{stage}'
|
||||
data[key] = data[key][:, inds, ...]
|
||||
return data
|
||||
|
||||
|
||||
# GENERAL SETTINGS:
|
||||
target_species = [
|
||||
@@ -52,21 +62,34 @@ example_file = {
|
||||
'Omocestus_rufipes': 'Omocestus_rufipes_DJN_32-40s724ms-48s779ms',
|
||||
'Pseudochorthippus_parallelus': 'Pseudochorthippus_parallelus_GBC_88-6s678ms-9s32.3ms'
|
||||
}[target_species]
|
||||
raw_path = search_files(target_species, incl='raw', dir='../data/inv/full/condensed/')[0]
|
||||
norm_path = search_files(target_species, incl='norm', dir='../data/inv/full/condensed/')[0]
|
||||
snip_path = search_files(example_file, dir='../data/inv/full/')[0]
|
||||
trace_path = search_files(target_species, dir='../data/inv/full/collected/')[0]
|
||||
ref_path = '../data/inv/full/ref_measures.npz'
|
||||
save_path = '../figures/fig_invariance_full.pdf'
|
||||
stages = ['filt', 'env', 'log', 'inv', 'conv', 'feat']
|
||||
load_kwargs = dict(
|
||||
files=stages,
|
||||
keywords=['scales', 'snip', 'measure']
|
||||
)
|
||||
raw_path = search_files(target_species, incl='unnormed', dir='../data/inv/full/condensed/')[0]
|
||||
base_path = search_files(target_species, incl='base', dir='../data/inv/full/condensed/')[0]
|
||||
range_path = search_files(target_species, incl='range', dir='../data/inv/full/condensed/')[0]
|
||||
snip_path = search_files(example_file, dir='../data/inv/full/')[0]
|
||||
save_path = '../figures/fig_invariance_full.pdf'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
exclude_zero = True
|
||||
|
||||
# SUBSET SETTINGS:
|
||||
types = np.array([1, -1, 2, -2, 3, -3, 4, -4])
|
||||
sigmas = np.array([0.004, 0.008, 0.016, 0.032])
|
||||
# types = [1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10]
|
||||
# sigmas = [0.001, 0.002, 0.004, 0.008, 0.016, 0.032]
|
||||
kernels = np.array([
|
||||
[1, 0.002],
|
||||
[-1, 0.002],
|
||||
[2, 0.004],
|
||||
[-2, 0.004],
|
||||
[3, 0.032],
|
||||
[-3, 0.032]
|
||||
])
|
||||
kernels = None
|
||||
|
||||
# GRAPH SETTINGS:
|
||||
fig_kwargs = dict(
|
||||
figsize=(32/2.54, 20/2.54),
|
||||
figsize=(32/2.54, 32/2.54),
|
||||
)
|
||||
super_grid_kwargs = dict(
|
||||
nrows=2,
|
||||
@@ -222,16 +245,25 @@ plateau_dot_kwargs = dict(
|
||||
|
||||
# EXECUTION:
|
||||
|
||||
# Load invariance data:
|
||||
raw_data, config = load_data(raw_path, files='scales', keywords='mean')
|
||||
norm_data, _ = load_data(norm_path, files='scales', keywords='mean')
|
||||
scales = raw_data['scales']
|
||||
# Load raw (unnormed) invariance data:
|
||||
data, config = load_data(raw_path, files='scales', keywords='mean')
|
||||
if exclude_zero:
|
||||
data = exclude_zero_scale(data, stages)
|
||||
scales = data['scales']
|
||||
|
||||
# Load snippet data:
|
||||
snip, _ = load_data(snip_path, files='example_scales', keywords='snip')
|
||||
t_full = np.arange(snip['snip_filt'].shape[0]) / config['rate']
|
||||
snip_scales = snip['example_scales']
|
||||
|
||||
# Optional kernel subset:
|
||||
reduce_kernels = False
|
||||
if any(var is not None for var in [kernels, types, sigmas]):
|
||||
kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas)
|
||||
data = reduce_kernel_set(data, kern_inds, keyword='mean')
|
||||
snip = reduce_kernel_set(snip, kern_inds, keyword='snip')
|
||||
reduce_kernels = True
|
||||
|
||||
# Adjust grid parameters:
|
||||
snip_grid_kwargs['ncols'] = snip_scales.size
|
||||
|
||||
@@ -270,43 +302,48 @@ for i in range(big_grid.ncols):
|
||||
ax.set_yscale('symlog', linthresh=0.01, linscale=0.1)
|
||||
xlabel(ax, xlabels['big'], transform=big_subfig, **xlab_big_kwargs)
|
||||
ylabel(ax, ylabels['big'][i], **ylab_big_kwargs)
|
||||
if i < (big_grid.ncols - 1):
|
||||
ax.set_ylim(scales[0], scales[-1])
|
||||
else:
|
||||
ax.set_ylim(0, 1)
|
||||
big_axes[i] = ax
|
||||
letter_subplots(big_axes, 'bc', **letter_big_kwargs)
|
||||
letter_subplots(big_axes, 'bcd', **letter_big_kwargs)
|
||||
|
||||
if False:
|
||||
if True:
|
||||
# Plot filtered snippets:
|
||||
plot_snippets(snip_axes[0, :], t_full, snip['snip_filt'],
|
||||
c=colors['filt'], lw=lw['filt'])
|
||||
c=colors['filt'], lw=lw['filt'])
|
||||
|
||||
# Plot envelope snippets:
|
||||
plot_snippets(snip_axes[1, :], t_full, snip['snip_env'],
|
||||
ymin=0, c=colors['env'], lw=lw['env'])
|
||||
ymin=0, c=colors['env'], lw=lw['env'])
|
||||
|
||||
# Plot logarithmic snippets:
|
||||
plot_snippets(snip_axes[2, :], t_full, snip['snip_log'],
|
||||
c=colors['log'], lw=lw['log'])
|
||||
c=colors['log'], lw=lw['log'])
|
||||
|
||||
# Plot invariant snippets:
|
||||
plot_snippets(snip_axes[3, :], t_full, snip['snip_inv'],
|
||||
c=colors['inv'], lw=lw['inv'])
|
||||
c=colors['inv'], lw=lw['inv'])
|
||||
|
||||
# Plot kernel response snippets:
|
||||
plot_snippets(snip_axes[4, :], t_full, snip['snip_conv'],
|
||||
c=colors['conv'], lw=lw['conv'])
|
||||
c=colors['conv'], lw=lw['conv'])
|
||||
|
||||
# Plot feature snippets:
|
||||
plot_snippets(snip_axes[5, :], t_full, snip['snip_feat'],
|
||||
ymin=0, ymax=1, c=colors['feat'], lw=lw['feat'])
|
||||
ymin=0, ymax=1, c=colors['feat'], lw=lw['feat'])
|
||||
del snip
|
||||
|
||||
# Plot analysis results:
|
||||
# Remember saturation points:
|
||||
crit_inds, crit_scales = {}, {}
|
||||
|
||||
# Unnormed measures:
|
||||
for stage in stages:
|
||||
# Get average unnormed measure across recordings:
|
||||
raw_measure = raw_data[f'mean_{stage}'].mean(axis=-1)
|
||||
|
||||
# Plot unmodified intensity measures:
|
||||
curve = plot_curves(big_axes[0], scales, raw_measure, c=colors[stage], lw=lw['big'],
|
||||
# Plot average intensity measure across recordings:
|
||||
curve = plot_curves(big_axes[0], scales, data[f'mean_{stage}'].mean(axis=-1),
|
||||
c=colors[stage], lw=lw['big'],
|
||||
fill_kwargs=dict(color=colors[stage], alpha=0.25))
|
||||
|
||||
# Indicate saturation point:
|
||||
if stage in ['log', 'inv', 'conv', 'feat']:
|
||||
ind = get_saturation(curve, **plateau_settings)[1]
|
||||
@@ -317,43 +354,60 @@ for stage in stages:
|
||||
transform=big_axes[0].get_xaxis_transform())
|
||||
big_axes[0].vlines(scale, big_axes[0].get_ylim()[0], curve[ind],
|
||||
color=colors[stage], **plateau_line_kwargs)
|
||||
# Log saturation point:
|
||||
crit_inds[stage] = ind
|
||||
crit_scales[stage] = scale
|
||||
del data
|
||||
|
||||
# Get average noise-related measure across recordings:
|
||||
norm_measure = norm_data[f'mean_{stage}'].mean(axis=-1)
|
||||
|
||||
# Plot noise-related intensity measure:
|
||||
curve = plot_curves(big_axes[1], scales, norm_measure, c=colors[stage], lw=lw['big'],
|
||||
# Noise baseline-related measures:
|
||||
data, _ = load_data(base_path, files='scales', keywords='mean')
|
||||
if exclude_zero:
|
||||
data = exclude_zero_scale(data, stages)
|
||||
if reduce_kernels:
|
||||
data = reduce_kernel_set(data, kern_inds, keyword='mean')
|
||||
for stage in stages:
|
||||
# Plot average intensity measure across recordings:
|
||||
curve = plot_curves(big_axes[1], scales, data[f'mean_{stage}'].mean(axis=-1),
|
||||
c=colors[stage], lw=lw['big'],
|
||||
fill_kwargs=dict(color=colors[stage], alpha=0.25))
|
||||
|
||||
# Indicate saturation point:
|
||||
if stage in ['log', 'inv', 'conv', 'feat']:
|
||||
ind, scale = crit_inds[stage], crit_scales[stage]
|
||||
big_axes[1].plot(scale, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs,
|
||||
transform=big_axes[1].get_xaxis_transform())
|
||||
big_axes[1].plot(scale, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs,
|
||||
transform=big_axes[1].get_xaxis_transform())
|
||||
big_axes[1].vlines(scale, big_axes[1].get_ylim()[0], curve[ind],
|
||||
color=colors[stage], **plateau_line_kwargs)
|
||||
del data
|
||||
|
||||
# Normalize measure to [0, 1]:
|
||||
min_measure = raw_measure.min(axis=0)
|
||||
max_measure = raw_measure.max(axis=0)
|
||||
norm_measure = (raw_measure - min_measure) / (max_measure - min_measure)
|
||||
|
||||
# Plot range-normalized intensity measure:
|
||||
curve = plot_curves(big_axes[2], scales, norm_measure, c=colors[stage], lw=lw['big'],
|
||||
# Min-max normalized measures:
|
||||
data, _ = load_data(range_path, files='scales', keywords='mean')
|
||||
if exclude_zero:
|
||||
data = exclude_zero_scale(data, stages)
|
||||
if reduce_kernels:
|
||||
data = reduce_kernel_set(data, kern_inds, keyword='mean')
|
||||
for stage in stages:
|
||||
# Plot average intensity measure across recordings:
|
||||
curve = plot_curves(big_axes[2], scales, data[f'mean_{stage}'].mean(axis=-1),
|
||||
c=colors[stage], lw=lw['big'],
|
||||
fill_kwargs=dict(color=colors[stage], alpha=0.25))
|
||||
|
||||
# Indicate saturation point:
|
||||
if stage in ['log', 'inv', 'conv', 'feat']:
|
||||
ind, scale = crit_inds[stage], crit_scales[stage]
|
||||
big_axes[2].plot(scale, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs,
|
||||
transform=big_axes[2].get_xaxis_transform())
|
||||
big_axes[2].plot(scale, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs,
|
||||
transform=big_axes[2].get_xaxis_transform())
|
||||
big_axes[2].vlines(scale, big_axes[2].get_ylim()[0], curve[ind],
|
||||
color=colors[stage], **plateau_line_kwargs)
|
||||
del data
|
||||
|
||||
# Save graph:
|
||||
if save_path is not None:
|
||||
fig.savefig(save_path)
|
||||
file_name = save_path.replace('.pdf', f'_{target_species}.pdf')
|
||||
fig.savefig(file_name)
|
||||
plt.show()
|
||||
|
||||
print('Done.')
|
||||
|
||||
@@ -36,7 +36,7 @@ target_species = [
|
||||
'Chorthippus_biguttulus',
|
||||
'Chorthippus_mollis',
|
||||
'Chrysochraon_dispar',
|
||||
'Euchorthippus_declivus',
|
||||
# 'Euchorthippus_declivus',
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
@@ -137,7 +137,7 @@ ylabels = dict(
|
||||
env='$x_{\\text{env}}$',
|
||||
log='$x_{\\text{dB}}$',
|
||||
inv='$x_{\\text{adapt}}$',
|
||||
big='$\\sigma_{\\alpha}\\,/\\,\\sigma_{\\eta}$',
|
||||
big='$\\sigma_x\\,/\\,\\sigma_{\\eta}$',
|
||||
)
|
||||
xlab_big_kwargs = dict(
|
||||
y=0,
|
||||
@@ -354,11 +354,18 @@ big_axes = np.zeros((big_grid.ncols,), dtype=object)
|
||||
for i, scales in enumerate([pure_scales, noise_scales, noise_scales]):
|
||||
ax = big_subfig.add_subplot(big_grid[0, i])
|
||||
ax.set_xlim(scales[0], scales[-1])
|
||||
ax.set_ylim(scales[0], scales[-1])
|
||||
ax.set_xscale('symlog', linthresh=scales[1], linscale=0.5)
|
||||
ax.set_yscale('symlog', linthresh=scales[1], linscale=0.5)
|
||||
ax.set_aspect(**anchor_kwargs)
|
||||
if i > 0:
|
||||
if i in [0, 1]:
|
||||
ax.set_ylim(scales[0], scales[-1])
|
||||
pos_equal = ax.get_position().bounds
|
||||
else:
|
||||
pos_auto = list(ax.get_position().bounds)
|
||||
ax.set_aspect('auto', adjustable='box', anchor=(0.5, 0.5))
|
||||
ax.set_position([pos_auto[0], pos_equal[1], pos_auto[2], pos_equal[3]])
|
||||
ax.set_ylim(0.9, 30)
|
||||
if i == 1:
|
||||
hide_ticks(ax, 'left')
|
||||
big_axes[i] = ax
|
||||
ylabel(big_axes[0], ylabels['big'], transform=big_subfig.transSubfigure, **ylab_big_kwargs)
|
||||
|
||||
@@ -11,7 +11,7 @@ target_species = [
|
||||
'Chorthippus_biguttulus',
|
||||
'Chorthippus_mollis',
|
||||
'Chrysochraon_dispar',
|
||||
'Euchorthippus_declivus',
|
||||
# 'Euchorthippus_declivus',
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
|
||||
400
python/fig_invariance_short.py
Normal file
400
python/fig_invariance_short.py
Normal file
@@ -0,0 +1,400 @@
|
||||
import plotstyle_plt
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from itertools import product
|
||||
from thunderhopper.filetools import search_files
|
||||
from thunderhopper.modeltools import load_data
|
||||
from thunderhopper.filtertools import find_kern_specs
|
||||
from misc_functions import get_saturation
|
||||
from color_functions import load_colors
|
||||
from plot_functions import hide_axis, ylimits, xlabel, ylabel, title_subplot,\
|
||||
plot_line, strip_zeros, time_bar,\
|
||||
letter_subplot, letter_subplots
|
||||
from IPython import embed
|
||||
|
||||
def plot_snippets(axes, time, snippets, ymin=None, ymax=None, **kwargs):
|
||||
ymin, ymax = ylimits(snippets, minval=ymin, maxval=ymax, pad=0.05)
|
||||
for i, ax in enumerate(axes):
|
||||
plot_line(ax, time, snippets[:, ..., i], ymin=ymin, ymax=ymax, **kwargs)
|
||||
return None
|
||||
|
||||
def plot_curves(ax, scales, measures, fill_kwargs={}, **kwargs):
|
||||
if measures.ndim == 1:
|
||||
ax.plot(scales, measures, **kwargs)[0]
|
||||
return measures
|
||||
median_measure = np.nanmedian(measures, axis=1)
|
||||
spread_measure = [np.nanpercentile(measures, 25, axis=1),
|
||||
np.nanpercentile(measures, 75, axis=1)]
|
||||
ax.plot(scales, median_measure, **kwargs)[0]
|
||||
ax.fill_between(scales, *spread_measure, **fill_kwargs)
|
||||
return median_measure
|
||||
|
||||
def exclude_zero_scale(data, stages):
|
||||
inds = data['scales'] > 0
|
||||
data['scales'] = data['scales'][inds]
|
||||
for stage in stages:
|
||||
data[f'mean_{stage}'] = data[f'mean_{stage}'][inds, ...]
|
||||
return data
|
||||
|
||||
def reduce_kernel_set(data, inds, keyword, stages=['conv', 'feat']):
|
||||
for stage in stages:
|
||||
key = f'{keyword}_{stage}'
|
||||
data[key] = data[key][:, inds, ...]
|
||||
return data
|
||||
|
||||
|
||||
# GENERAL SETTINGS:
|
||||
target_species = [
|
||||
'Chorthippus_biguttulus',
|
||||
'Chorthippus_mollis',
|
||||
'Chrysochraon_dispar',
|
||||
'Euchorthippus_declivus',
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
][5]
|
||||
example_file = {
|
||||
'Chorthippus_biguttulus': 'Chorthippus_biguttulus_GBC_94-17s73.1ms-19s977ms',
|
||||
'Chorthippus_mollis': 'Chorthippus_mollis_DJN_41_T28C-46s4.58ms-1m15s697ms',
|
||||
'Chrysochraon_dispar': 'Chrysochraon_dispar_DJN_26_T28C_DT-32s134ms-34s432ms',
|
||||
'Euchorthippus_declivus': 'Euchorthippus_declivus_FTN_79-2s167ms-2s563ms',
|
||||
'Gomphocerippus_rufus': 'Gomphocerippus_rufus_FTN_91-3-884ms-10s427ms',
|
||||
'Omocestus_rufipes': 'Omocestus_rufipes_DJN_32-40s724ms-48s779ms',
|
||||
'Pseudochorthippus_parallelus': 'Pseudochorthippus_parallelus_GBC_88-6s678ms-9s32.3ms'
|
||||
}[target_species]
|
||||
stages = ['filt', 'env', 'conv', 'feat']
|
||||
raw_path = search_files(target_species, incl='unnormed', dir='../data/inv/short/condensed/')[0]
|
||||
base_path = search_files(target_species, incl='base', dir='../data/inv/short/condensed/')[0]
|
||||
range_path = search_files(target_species, incl='range', dir='../data/inv/short/condensed/')[0]
|
||||
snip_path = search_files(example_file, dir='../data/inv/short/')[0]
|
||||
save_path = '../figures/fig_invariance_short.pdf'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
exclude_zero = True
|
||||
|
||||
# SUBSET SETTINGS:
|
||||
types = np.array([1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10])
|
||||
sigmas = np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032])
|
||||
# types = [1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10]
|
||||
# sigmas = [0.001, 0.002, 0.004, 0.008, 0.016, 0.032]
|
||||
kernels = np.array([
|
||||
[1, 0.002],
|
||||
[-1, 0.002],
|
||||
[2, 0.004],
|
||||
[-2, 0.004],
|
||||
[3, 0.032],
|
||||
[-3, 0.032]
|
||||
])
|
||||
kernels = None
|
||||
|
||||
# GRAPH SETTINGS:
|
||||
fig_kwargs = dict(
|
||||
figsize=(32/2.54, 32/2.54),
|
||||
)
|
||||
super_grid_kwargs = dict(
|
||||
nrows=2,
|
||||
ncols=1,
|
||||
wspace=0,
|
||||
hspace=0,
|
||||
left=0,
|
||||
right=1,
|
||||
bottom=0,
|
||||
top=1,
|
||||
height_ratios=[3, 2]
|
||||
)
|
||||
subfig_specs = dict(
|
||||
snip=(0, 0),
|
||||
big=(1, 0),
|
||||
)
|
||||
snip_grid_kwargs = dict(
|
||||
nrows=len(stages),
|
||||
ncols=None,
|
||||
wspace=0.1,
|
||||
hspace=0.4,
|
||||
left=0.08,
|
||||
right=0.95,
|
||||
bottom=0.08,
|
||||
top=0.95
|
||||
)
|
||||
big_grid_kwargs = dict(
|
||||
nrows=1,
|
||||
ncols=3,
|
||||
wspace=0.2,
|
||||
hspace=0,
|
||||
left=snip_grid_kwargs['left'],
|
||||
right=0.96,
|
||||
bottom=0.2,
|
||||
top=0.95
|
||||
)
|
||||
|
||||
# PLOT SETTINGS:
|
||||
fs = dict(
|
||||
lab_norm=16,
|
||||
lab_tex=20,
|
||||
letter=22,
|
||||
tit_norm=16,
|
||||
tit_tex=20,
|
||||
bar=16,
|
||||
)
|
||||
colors = load_colors('../data/stage_colors.npz')
|
||||
lw = dict(
|
||||
filt=0.25,
|
||||
env=0.25,
|
||||
conv=0.25,
|
||||
feat=1,
|
||||
big=3,
|
||||
plateau=1.5,
|
||||
)
|
||||
xlabels = dict(
|
||||
big='scale $\\alpha$',
|
||||
)
|
||||
ylabels = dict(
|
||||
filt='$x_{\\text{filt}}$',
|
||||
env='$x_{\\text{env}}$',
|
||||
conv='$c_i$',
|
||||
feat='$f_i$',
|
||||
big=['intensity', 'rel. intensity', 'norm. intensity']
|
||||
)
|
||||
xlab_big_kwargs = dict(
|
||||
y=0,
|
||||
fontsize=fs['lab_norm'],
|
||||
ha='center',
|
||||
va='bottom',
|
||||
)
|
||||
ylab_snip_kwargs = dict(
|
||||
x=0,
|
||||
fontsize=fs['lab_tex'],
|
||||
rotation=0,
|
||||
ha='left',
|
||||
va='center'
|
||||
)
|
||||
ylab_big_kwargs = dict(
|
||||
x=-0.12,
|
||||
fontsize=fs['lab_norm'],
|
||||
ha='center',
|
||||
va='bottom',
|
||||
)
|
||||
yloc = dict(
|
||||
filt=3000,
|
||||
env=1000,
|
||||
conv=30,
|
||||
feat=1,
|
||||
)
|
||||
title_kwargs = dict(
|
||||
x=0.5,
|
||||
yref=1,
|
||||
ha='center',
|
||||
va='top',
|
||||
fontsize=fs['tit_norm'],
|
||||
)
|
||||
letter_snip_kwargs = dict(
|
||||
x=0,
|
||||
yref=0.5,
|
||||
ha='left',
|
||||
va='center',
|
||||
fontsize=fs['letter'],
|
||||
)
|
||||
letter_big_kwargs = dict(
|
||||
x=0,
|
||||
y=1,
|
||||
ha='left',
|
||||
va='bottom',
|
||||
fontsize=fs['letter'],
|
||||
)
|
||||
bar_time = 5
|
||||
bar_kwargs = dict(
|
||||
dur=bar_time,
|
||||
y0=-0.25,
|
||||
y1=-0.1,
|
||||
xshift=1,
|
||||
color='k',
|
||||
lw=0,
|
||||
clip_on=False,
|
||||
text_pos=(-0.1, 0.5),
|
||||
text_str=f'${bar_time}\\,\\text{{s}}$',
|
||||
text_kwargs=dict(
|
||||
fontsize=fs['bar'],
|
||||
ha='right',
|
||||
va='center',
|
||||
)
|
||||
)
|
||||
plateau_settings = dict(
|
||||
low=0.05,
|
||||
high=0.95,
|
||||
first=True,
|
||||
last=True,
|
||||
condense=None,
|
||||
)
|
||||
plateau_line_kwargs = dict(
|
||||
lw=lw['plateau'],
|
||||
ls='--',
|
||||
zorder=1,
|
||||
)
|
||||
plateau_dot_kwargs = dict(
|
||||
marker='o',
|
||||
markersize=8,
|
||||
markeredgewidth=1,
|
||||
clip_on=False,
|
||||
)
|
||||
|
||||
# EXECUTION:
|
||||
|
||||
# Load raw (unnormed) invariance data:
|
||||
data, config = load_data(raw_path, files='scales', keywords='mean')
|
||||
if exclude_zero:
|
||||
data = exclude_zero_scale(data, stages)
|
||||
scales = data['scales']
|
||||
|
||||
# Load snippet data:
|
||||
snip, _ = load_data(snip_path, files='example_scales', keywords='snip')
|
||||
t_full = np.arange(snip['snip_filt'].shape[0]) / config['rate']
|
||||
snip_scales = snip['example_scales']
|
||||
|
||||
# Optional kernel subset:
|
||||
reduce_kernels = False
|
||||
if any(var is not None for var in [kernels, types, sigmas]):
|
||||
kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas)
|
||||
data = reduce_kernel_set(data, kern_inds, keyword='mean')
|
||||
snip = reduce_kernel_set(snip, kern_inds, keyword='snip')
|
||||
reduce_kernels = True
|
||||
|
||||
# Adjust grid parameters:
|
||||
snip_grid_kwargs['ncols'] = snip_scales.size
|
||||
|
||||
# Prepare overall graph:
|
||||
fig = plt.figure(**fig_kwargs)
|
||||
super_grid = fig.add_gridspec(**super_grid_kwargs)
|
||||
|
||||
# Prepare stage-specific snippet axes:
|
||||
snip_subfig = fig.add_subfigure(super_grid[subfig_specs['snip']])
|
||||
snip_grid = snip_subfig.add_gridspec(**snip_grid_kwargs)
|
||||
snip_axes = np.zeros((snip_grid.nrows, snip_grid.ncols), dtype=object)
|
||||
for i, j in product(range(snip_grid.nrows), range(snip_grid.ncols)):
|
||||
ax = snip_subfig.add_subplot(snip_grid[i, j])
|
||||
ax.set_xlim(t_full[0], t_full[-1])
|
||||
ax.yaxis.set_major_locator(plt.MultipleLocator(yloc[stages[i]]))
|
||||
hide_axis(ax, 'bottom')
|
||||
if i == 0:
|
||||
title = title_subplot(ax, f'$\\alpha={strip_zeros(snip_scales[j])}$',
|
||||
ref=snip_subfig, **title_kwargs)
|
||||
if j == 0:
|
||||
ylabel(ax, ylabels[stages[i]], **ylab_snip_kwargs, transform=snip_subfig.transSubfigure)
|
||||
else:
|
||||
hide_axis(ax, 'left')
|
||||
snip_axes[i, j] = ax
|
||||
time_bar(snip_axes[-1, -1], **bar_kwargs)
|
||||
letter_subplot(snip_subfig, 'a', ref=title, **letter_snip_kwargs)
|
||||
|
||||
# Prepare analysis axes:
|
||||
big_subfig = fig.add_subfigure(super_grid[subfig_specs['big']])
|
||||
big_grid = big_subfig.add_gridspec(**big_grid_kwargs)
|
||||
big_axes = np.zeros((big_grid.ncols,), dtype=object)
|
||||
for i in range(big_grid.ncols):
|
||||
ax = big_subfig.add_subplot(big_grid[0, i])
|
||||
ax.set_xlim(scales[0], scales[-1])
|
||||
ax.set_xscale('symlog', linthresh=scales[1], linscale=0.5)
|
||||
ax.set_yscale('symlog', linthresh=0.01, linscale=0.1)
|
||||
xlabel(ax, xlabels['big'], transform=big_subfig, **xlab_big_kwargs)
|
||||
ylabel(ax, ylabels['big'][i], **ylab_big_kwargs)
|
||||
if i < (big_grid.ncols - 1):
|
||||
ax.set_ylim(scales[0], scales[-1])
|
||||
else:
|
||||
ax.set_ylim(0, 1)
|
||||
big_axes[i] = ax
|
||||
letter_subplots(big_axes, 'bcd', **letter_big_kwargs)
|
||||
|
||||
if True:
|
||||
# Plot filtered snippets:
|
||||
plot_snippets(snip_axes[0, :], t_full, snip['snip_filt'],
|
||||
c=colors['filt'], lw=lw['filt'])
|
||||
|
||||
# Plot envelope snippets:
|
||||
plot_snippets(snip_axes[1, :], t_full, snip['snip_env'],
|
||||
ymin=0, c=colors['env'], lw=lw['env'])
|
||||
|
||||
# Plot kernel response snippets:
|
||||
plot_snippets(snip_axes[2, :], t_full, snip['snip_conv'],
|
||||
c=colors['conv'], lw=lw['conv'])
|
||||
|
||||
# Plot feature snippets:
|
||||
plot_snippets(snip_axes[3, :], t_full, snip['snip_feat'],
|
||||
ymin=0, ymax=1, c=colors['feat'], lw=lw['feat'])
|
||||
del snip
|
||||
|
||||
# Remember saturation points:
|
||||
crit_inds, crit_scales = {}, {}
|
||||
|
||||
# Unnormed measures:
|
||||
for stage in stages:
|
||||
# Plot average intensity measure across recordings:
|
||||
curve = plot_curves(big_axes[0], scales, data[f'mean_{stage}'].mean(axis=-1),
|
||||
c=colors[stage], lw=lw['big'],
|
||||
fill_kwargs=dict(color=colors[stage], alpha=0.25))
|
||||
# Indicate saturation point:
|
||||
if stage == 'feat':
|
||||
ind = get_saturation(curve, **plateau_settings)[1]
|
||||
scale = scales[ind]
|
||||
big_axes[0].plot(scale, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs,
|
||||
transform=big_axes[0].get_xaxis_transform())
|
||||
big_axes[0].plot(scale, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs,
|
||||
transform=big_axes[0].get_xaxis_transform())
|
||||
big_axes[0].vlines(scale, big_axes[0].get_ylim()[0], curve[ind],
|
||||
color=colors[stage], **plateau_line_kwargs)
|
||||
# Log saturation point:
|
||||
crit_inds[stage] = ind
|
||||
crit_scales[stage] = scale
|
||||
del data
|
||||
|
||||
# Noise baseline-related measures:
|
||||
data, _ = load_data(base_path, files='scales', keywords='mean')
|
||||
if exclude_zero:
|
||||
data = exclude_zero_scale(data, stages)
|
||||
if reduce_kernels:
|
||||
data = reduce_kernel_set(data, kern_inds, keyword='mean')
|
||||
for stage in stages:
|
||||
# Plot average intensity measure across recordings:
|
||||
curve = plot_curves(big_axes[1], scales, data[f'mean_{stage}'].mean(axis=-1),
|
||||
c=colors[stage], lw=lw['big'],
|
||||
fill_kwargs=dict(color=colors[stage], alpha=0.25))
|
||||
# Indicate saturation point:
|
||||
if stage == 'feat':
|
||||
ind, scale = crit_inds[stage], crit_scales[stage]
|
||||
big_axes[1].plot(scale, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs,
|
||||
transform=big_axes[1].get_xaxis_transform())
|
||||
big_axes[1].plot(scale, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs,
|
||||
transform=big_axes[1].get_xaxis_transform())
|
||||
big_axes[1].vlines(scale, big_axes[1].get_ylim()[0], curve[ind],
|
||||
color=colors[stage], **plateau_line_kwargs)
|
||||
del data
|
||||
|
||||
# Min-max normalized measures:
|
||||
data, _ = load_data(range_path, files='scales', keywords='mean')
|
||||
if exclude_zero:
|
||||
data = exclude_zero_scale(data, stages)
|
||||
if reduce_kernels:
|
||||
data = reduce_kernel_set(data, kern_inds, keyword='mean')
|
||||
for stage in stages:
|
||||
# Plot average intensity measure across recordings:
|
||||
curve = plot_curves(big_axes[2], scales, data[f'mean_{stage}'].mean(axis=-1),
|
||||
c=colors[stage], lw=lw['big'],
|
||||
fill_kwargs=dict(color=colors[stage], alpha=0.25))
|
||||
|
||||
# Indicate saturation point:
|
||||
if stage == 'feat':
|
||||
ind, scale = crit_inds[stage], crit_scales[stage]
|
||||
big_axes[2].plot(scale, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs,
|
||||
transform=big_axes[2].get_xaxis_transform())
|
||||
big_axes[2].plot(scale, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs,
|
||||
transform=big_axes[2].get_xaxis_transform())
|
||||
big_axes[2].vlines(scale, big_axes[2].get_ylim()[0], curve[ind],
|
||||
color=colors[stage], **plateau_line_kwargs)
|
||||
del data
|
||||
|
||||
# Save graph:
|
||||
if save_path is not None:
|
||||
file_name = save_path.replace('.pdf', f'_{target_species}.pdf')
|
||||
fig.savefig(file_name)
|
||||
plt.show()
|
||||
|
||||
print('Done.')
|
||||
embed()
|
||||
@@ -13,7 +13,7 @@ target_species = [
|
||||
'Chorthippus_biguttulus',
|
||||
'Chorthippus_mollis',
|
||||
'Chrysochraon_dispar',
|
||||
'Euchorthippus_declivus',
|
||||
# 'Euchorthippus_declivus',
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
|
||||
@@ -39,17 +39,20 @@ def plot_bi_snippets(axes, time, binary, **kwargs):
|
||||
plot_barcode(ax, time, binary[:, None], **kwargs)
|
||||
return None
|
||||
|
||||
def side_distributions(axes, snippets, inset_bounds, thresh, nbins=1000,
|
||||
fill_kwargs={}, **kwargs):
|
||||
limits = np.array([snippets.min(), snippets.max()]) * 1.1
|
||||
def side_distributions(axes, snippets, inset_bounds, thresh, nbins=50,
|
||||
limits=None, fill_kwargs={}, **kwargs):
|
||||
if limits is None:
|
||||
limits = np.array([snippets.min(), snippets.max()]) * 1.1
|
||||
edges = np.linspace(*limits, nbins + 1)
|
||||
centers = edges[:-1] + (edges[1] - edges[0]) / 2
|
||||
insets = []
|
||||
for ax, snippet in zip(axes, snippets.T):
|
||||
pdf, _ = np.histogram(snippet, edges, density=True)
|
||||
inset = ax.inset_axes(inset_bounds)
|
||||
inset.plot(pdf, centers, **kwargs)
|
||||
inset.fill_betweenx(centers, pdf.min(), pdf, where=(centers > thresh), **fill_kwargs)
|
||||
handle = inset.plot(pdf, centers, **kwargs)[0]
|
||||
set_clip_box(handle, inset, bounds=[[-0.05, 0], [1.05, 1]])
|
||||
handle = inset.fill_betweenx(centers, pdf.min(), pdf, where=(centers > thresh), **fill_kwargs)
|
||||
set_clip_box(handle, inset, bounds=[[-0.05, 0], [1.05, 1]])
|
||||
inset.set_xlim(0, pdf.max())
|
||||
inset.set_ylim(ax.get_ylim())
|
||||
inset.axis('off')
|
||||
@@ -99,7 +102,7 @@ snip_grid_kwargs = dict(
|
||||
right=0.93,
|
||||
bottom=0.15,
|
||||
top=0.95,
|
||||
height_ratios=[2, 1, 1]
|
||||
height_ratios=[4, 1, 2]
|
||||
)
|
||||
input_grid_kwargs = dict(
|
||||
nrows=1,
|
||||
@@ -115,10 +118,10 @@ big_grid_kwargs = dict(
|
||||
nrows=2,
|
||||
ncols=1,
|
||||
wspace=0,
|
||||
hspace=0.3,
|
||||
hspace=0.15,
|
||||
left=0.17,
|
||||
right=0.96,
|
||||
bottom=0.1,
|
||||
bottom=0.05,
|
||||
top=0.99
|
||||
)
|
||||
dist_inset_bounds = [1.02, 0, 0.2, 1]
|
||||
@@ -140,6 +143,7 @@ lw = dict(
|
||||
bi=0.1,
|
||||
feat=3,
|
||||
big=4,
|
||||
thresh=1.5,
|
||||
kern=2.5,
|
||||
plateau=1.5,
|
||||
)
|
||||
@@ -155,16 +159,16 @@ ylabels = dict(
|
||||
big='$\\mu_f$',
|
||||
)
|
||||
xlab_alpha_kwargs = dict(
|
||||
y=-0.15,
|
||||
y=0.5,
|
||||
fontsize=fs['lab_norm'],
|
||||
ha='center',
|
||||
va='top',
|
||||
va='bottom',
|
||||
)
|
||||
xlab_sigma_kwargs = dict(
|
||||
y=-0.12,
|
||||
y=0,
|
||||
fontsize=fs['lab_tex'],
|
||||
ha=xlab_alpha_kwargs['ha'],
|
||||
va=xlab_alpha_kwargs['va'],
|
||||
va='bottom',
|
||||
)
|
||||
ylab_snip_kwargs = dict(
|
||||
x=0.08,
|
||||
@@ -212,8 +216,8 @@ letter_snip_kwargs = dict(
|
||||
fontsize=fs['letter'],
|
||||
)
|
||||
letter_big_kwargs = dict(
|
||||
x=0,
|
||||
yref=letter_snip_kwargs['y'],
|
||||
xref=0,
|
||||
y=1,
|
||||
ha='left',
|
||||
va='top',
|
||||
fontsize=fs['letter'],
|
||||
@@ -230,6 +234,12 @@ dist_fill_kwargs = dict(
|
||||
color=colors['bi'],
|
||||
lw=0.1,
|
||||
)
|
||||
thresh_kwargs = dict(
|
||||
color='k',
|
||||
lw=lw['thresh'],
|
||||
ls='--',
|
||||
zorder=3,
|
||||
)
|
||||
bar_time = 0.1
|
||||
bar_kwargs = dict(
|
||||
dur=bar_time,
|
||||
@@ -353,9 +363,11 @@ for i in range(thresh_rel.size):
|
||||
subfig_spec[0] = slice(*(subfig_spec[0] + i * snip_rows))
|
||||
snip_subfig = fig.add_subfigure(super_grid[*subfig_spec])
|
||||
axes = add_snip_axes(snip_subfig, snip_grid_kwargs)
|
||||
low_box = axes[-1, 0].get_position()
|
||||
high_box = axes[0, 0].get_position()
|
||||
[hide_axis(ax, 'left') for ax in axes[1:, 1]]
|
||||
super_ylabel(f'$\\Theta={strip_zeros(thresh_rel[i])}\\cdot\\sigma_{{\\eta}}$',
|
||||
snip_subfig, axes[-1, 0], axes[0, 0], **ylab_super_kwargs)
|
||||
snip_subfig, axes[-1, 0], axes[0, 0], **ylab_super_kwargs)
|
||||
for (ax1, ax2), stage in zip(axes[:, :2], stages):
|
||||
ax1.yaxis.set_major_locator(plt.MultipleLocator(yloc[stage][0]))
|
||||
ax2.yaxis.set_major_locator(plt.MultipleLocator(yloc[stage][1]))
|
||||
@@ -376,17 +388,18 @@ alpha_ax.set_xlim(scales[0], scales[-1])
|
||||
alpha_ax.set_xscale('symlog', linthresh=scales[scales > 0][0], linscale=0.5)
|
||||
ylimits(pure_data['measure_feat'], alpha_ax, minval=0, pad=ypad['big'])
|
||||
alpha_ax.yaxis.set_major_locator(plt.MultipleLocator(yloc['big']))
|
||||
xlabel(alpha_ax, xlabels['alpha'], **xlab_alpha_kwargs)
|
||||
xlabel(alpha_ax, xlabels['alpha'], **xlab_alpha_kwargs, transform=big_subfig)
|
||||
ylabel(alpha_ax, ylabels['big'], transform=big_subfig.transSubfigure, **ylab_big_kwargs)
|
||||
letter_subplot(alpha_ax, 'e', ref=big_subfig, **letter_big_kwargs)
|
||||
|
||||
sigma_ax = big_subfig.add_subplot(big_grid[1, 0])
|
||||
sigma_ax.set_xlim(noise_data['measure_inv'].min(), noise_data['measure_inv'].max())
|
||||
sigma_ax.set_xlim(scales[0], scales[-1])
|
||||
sigma_ax.set_xlim(1, noise_data['measure_inv'].max())
|
||||
sigma_ax.set_xscale('symlog', linthresh=scales[scales > 0][0], linscale=0.5)
|
||||
ylimits(pure_data['measure_feat'], sigma_ax, minval=0, pad=ypad['big'])
|
||||
sigma_ax.yaxis.set_major_locator(plt.MultipleLocator(yloc['big']))
|
||||
xlabel(sigma_ax, xlabels['sigma'], **xlab_sigma_kwargs)
|
||||
xlabel(sigma_ax, xlabels['sigma'], **xlab_sigma_kwargs, transform=big_subfig)
|
||||
ylabel(sigma_ax, ylabels['big'], transform=big_subfig.transSubfigure, **ylab_big_kwargs)
|
||||
letter_subplot(sigma_ax, 'f', ref=big_subfig, **letter_big_kwargs)
|
||||
|
||||
# Plot intensity-adapted snippets:
|
||||
plot_snippets(input_axes, t_full, noise_data['snip_inv'],
|
||||
@@ -403,13 +416,18 @@ for i, (subfig, axes) in enumerate(zip(snip_subfigs, snip_axes)):
|
||||
# Plot kernel response snippets:
|
||||
plot_snippets(axes[0, :], t_full, noise_data['snip_conv'], thresh=thresh_abs[i],
|
||||
ypad=ypad['conv'], fill_kwargs=dist_fill_kwargs, c=shaded['conv'][i], lw=lw['conv'])
|
||||
ylimits(noise_data['snip_conv'][:, 0], axes[0, 0], pad=ypad['conv'])
|
||||
ylim_zoom = ylimits(noise_data['snip_conv'][:, 0], axes[0, 0],
|
||||
pad=ypad['conv'], maxval=thresh_abs[-1])
|
||||
|
||||
# Indicate absolute threshold value:
|
||||
handle = axes[0, 0].axhline(thresh_abs[i], **thresh_kwargs)
|
||||
set_clip_box(handle, axes[0, 0], bounds=[[0, 0], [1, 1.05]])
|
||||
|
||||
# Plot kernel response distributions:
|
||||
side_distributions(axes[0, :1], noise_data['snip_conv'][:, :1], dist_inset_bounds,
|
||||
thresh_abs[i], nbins=50, fill_kwargs=dist_fill_kwargs, **dist_kwargs)
|
||||
thresh_abs[i], nbins=50, limits=ylim_zoom, fill_kwargs=dist_fill_kwargs, **dist_kwargs)
|
||||
side_distributions(axes[0, 1:], noise_data['snip_conv'][:, 1:], dist_inset_bounds,
|
||||
thresh_abs[i], nbins=50, fill_kwargs=dist_fill_kwargs, **dist_kwargs)
|
||||
thresh_abs[i], nbins=50, fill_kwargs=dist_fill_kwargs, **dist_kwargs)
|
||||
|
||||
# Plot binary snippets:
|
||||
plot_bi_snippets(axes[1, :], t_full, noise_data['snip_bi'][:, :, i],
|
||||
@@ -444,7 +462,7 @@ for ax, x in zip([alpha_ax, sigma_ax], [scales, noise_data['measure_inv']]):
|
||||
ax.plot(x[ind], 0, mfc=color, mec='k', alpha=0.75, zorder=6,
|
||||
**plateau_dot_kwargs, transform=ax.get_xaxis_transform())
|
||||
ax.vlines(x[ind], ax.get_ylim()[0], noise_data['measure_feat'][ind, i],
|
||||
color=color, **plateau_line_kwargs)
|
||||
color=color, **plateau_line_kwargs)
|
||||
|
||||
# Add proxy legend:
|
||||
if ax == alpha_ax:
|
||||
|
||||
@@ -165,7 +165,7 @@ target_species = [
|
||||
'Chorthippus_biguttulus',
|
||||
'Chorthippus_mollis',
|
||||
'Chrysochraon_dispar',
|
||||
'Euchorthippus_declivus',
|
||||
# 'Euchorthippus_declivus',
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
@@ -185,7 +185,7 @@ load_kwargs = dict(
|
||||
)
|
||||
save_path = '../figures/fig_invariance_thresh_lp_species.pdf'
|
||||
exclude_zero = True
|
||||
show_floor = False
|
||||
show_floor = True
|
||||
|
||||
# SUBSET SETTINGS:
|
||||
thresh_rel = np.array([0.5, 1, 3])[0]
|
||||
@@ -267,14 +267,15 @@ fs = dict(
|
||||
bar=16,
|
||||
)
|
||||
species_colors = load_colors('../data/species_colors.npz')
|
||||
kernel_shades = [0, 0.75]
|
||||
kernel_shades = [0, 0.5]
|
||||
scale_shades = [1, 0]
|
||||
noise_colors = [(0.5, 0.5, 0.5), (0.7, 0.7, 0.7)]
|
||||
lw = dict(
|
||||
song=0.5,
|
||||
feat=3,
|
||||
kern=2.5,
|
||||
plateau=3,
|
||||
bar=3,
|
||||
plateau=1.5,
|
||||
)
|
||||
space_kwargs = dict(
|
||||
s=30,
|
||||
@@ -411,6 +412,17 @@ plateau_settings = dict(
|
||||
last=True,
|
||||
condense='norm',
|
||||
)
|
||||
plateau_line_kwargs = dict(
|
||||
lw=lw['plateau'],
|
||||
ls='--',
|
||||
zorder=1,
|
||||
)
|
||||
plateau_dot_kwargs = dict(
|
||||
marker='o',
|
||||
markersize=8,
|
||||
markeredgewidth=1,
|
||||
clip_on=False,
|
||||
)
|
||||
|
||||
# EXECUTION:
|
||||
|
||||
@@ -566,6 +578,28 @@ for i, species in enumerate(target_species):
|
||||
handles = noise_ax.plot(scales, noise_measure, lw=lw['feat'])
|
||||
[h.set_color(c) for h, c in zip(handles, kern_colors)]
|
||||
|
||||
# Indicate saturation points:
|
||||
for j in range(pure_measure.shape[1]):
|
||||
color = kern_colors[j]
|
||||
# Indicate feature-specific saturation points of pure curves:
|
||||
ind = get_saturation(pure_measure[:, j], **plateau_settings)[1]
|
||||
scale = scales[ind]
|
||||
pure_ax.plot(scale, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs,
|
||||
transform=pure_ax.get_xaxis_transform())
|
||||
pure_ax.plot(scale, 0, mfc=color, mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs,
|
||||
transform=pure_ax.get_xaxis_transform())
|
||||
pure_ax.vlines(scale, pure_ax.get_ylim()[0], pure_measure[ind, j],
|
||||
color=color, **plateau_line_kwargs)
|
||||
# Indicate feature-specific saturation points of noise curves:
|
||||
ind = get_saturation(noise_measure[:, j], **plateau_settings)[1]
|
||||
scale = scales[ind]
|
||||
noise_ax.plot(scale, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs,
|
||||
transform=noise_ax.get_xaxis_transform())
|
||||
noise_ax.plot(scale, 0, mfc=color, mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs,
|
||||
transform=noise_ax.get_xaxis_transform())
|
||||
noise_ax.vlines(scale, noise_ax.get_ylim()[0], noise_measure[ind, j],
|
||||
color=color, **plateau_line_kwargs)
|
||||
|
||||
if i == 0:
|
||||
# Indicate kernel waveforms:
|
||||
ylims = ylimits(config['kernels'], pad=0.05)
|
||||
@@ -604,15 +638,15 @@ for i, species in enumerate(target_species):
|
||||
noise_bars[0].tick_params(axis='y', which='both', left=True, labelleft=True)
|
||||
ylabel(noise_bars[0], ylabels['bar'], **ylab_cbar_kwargs)
|
||||
|
||||
# Indicate plateaus of pure invariance curves:
|
||||
# Indicate across-feature saturation points of pure curves:
|
||||
low_ind, high_ind = get_saturation(pure_measure, **plateau_settings)
|
||||
pure_bars[i].axhline(scales[low_ind], c=noise_colors[0], lw=lw['plateau'])
|
||||
pure_bars[i].axhline(scales[high_ind], c=noise_colors[1], lw=lw['plateau'])
|
||||
pure_bars[i].axhline(scales[low_ind], c=noise_colors[0], lw=lw['bar'])
|
||||
pure_bars[i].axhline(scales[high_ind], c=noise_colors[1], lw=lw['bar'])
|
||||
|
||||
# Indicate plateaus of noise invariance curves:
|
||||
# Indicate across-feature saturation points of noise curves:
|
||||
low_ind, high_ind = get_saturation(noise_measure, **plateau_settings)
|
||||
noise_bars[i].axhline(scales[low_ind], c=noise_colors[0], lw=lw['plateau'])
|
||||
noise_bars[i].axhline(scales[high_ind], c=noise_colors[1], lw=lw['plateau'])
|
||||
noise_bars[i].axhline(scales[low_ind], c=noise_colors[0], lw=lw['bar'])
|
||||
noise_bars[i].axhline(scales[high_ind], c=noise_colors[1], lw=lw['bar'])
|
||||
|
||||
# Log start and end of invariance curve:
|
||||
min_noise_feat[i, :] = noise_measure.min(axis=0)
|
||||
|
||||
@@ -136,9 +136,9 @@ zoom_kwargs = dict(
|
||||
t = [1, -1, 2, -2, 3, -3, 4, -4]
|
||||
s = [0.004, 0.032]
|
||||
kernels = np.array([[i, j] for i in t for j in s])
|
||||
conv_colors = load_colors('../data/conv_colors.npz')
|
||||
bi_colors = load_colors('../data/bi_colors.npz')
|
||||
feat_colors = load_colors('../data/feat_colors.npz')
|
||||
conv_colors = load_colors('../data/conv_colors_subset.npz')
|
||||
bi_colors = load_colors('../data/bi_colors_subset.npz')
|
||||
feat_colors = load_colors('../data/feat_colors_subset.npz')
|
||||
|
||||
# EXECUTION:
|
||||
for data_path in data_paths:
|
||||
|
||||
130
python/fig_saturation_log-hp_appendix.py
Normal file
130
python/fig_saturation_log-hp_appendix.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import plotstyle_plt
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from thunderhopper.filetools import search_files
|
||||
from plot_functions import xlabel, super_ylabel
|
||||
from color_functions import load_colors
|
||||
from misc_functions import shorten_species
|
||||
from IPython import embed
|
||||
|
||||
# GENERAL SETTINGS:
|
||||
target_species = [
|
||||
'Chorthippus_biguttulus',
|
||||
'Chorthippus_mollis',
|
||||
'Chrysochraon_dispar',
|
||||
# 'Euchorthippus_declivus',
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
]
|
||||
data_path = '../data/inv/log_hp/saturation/'
|
||||
save_path = '../figures/fig_saturation_log-hp_appendix.pdf'
|
||||
|
||||
# GRAPH SETTINGS:
|
||||
fig_kwargs = dict(
|
||||
figsize=(32/2.54, 16/2.54),
|
||||
nrows=len(target_species),
|
||||
ncols=1,
|
||||
sharex=True,
|
||||
sharey=False,
|
||||
gridspec_kw=dict(
|
||||
wspace=0,
|
||||
hspace=0.3,
|
||||
left=0.09,
|
||||
right=0.99,
|
||||
bottom=0.1,
|
||||
top=0.95,
|
||||
)
|
||||
)
|
||||
|
||||
# PLOT SETTINGS:
|
||||
colors = load_colors('../data/species_colors.npz')
|
||||
bar_kwargs = dict(
|
||||
ec='w',
|
||||
)
|
||||
mean_kwargs = dict(
|
||||
c='k',
|
||||
lw=3,
|
||||
ls='--'
|
||||
)
|
||||
xlab = 'scale $\\alpha$'
|
||||
ylab = '$\\text{PDF}_{\\alpha}$'
|
||||
xlab_kwargs = dict(
|
||||
y=0,
|
||||
fontsize=16,
|
||||
ha='center',
|
||||
va='bottom',
|
||||
)
|
||||
ylab_kwargs = dict(
|
||||
x=0.005,
|
||||
fontsize=16,
|
||||
ha='left',
|
||||
va='center',
|
||||
)
|
||||
leg_x = fig_kwargs['gridspec_kw']['left']
|
||||
leg_y = fig_kwargs['gridspec_kw']['top']
|
||||
leg_box = [
|
||||
leg_x,
|
||||
leg_y,
|
||||
fig_kwargs['gridspec_kw']['right'] - leg_x,
|
||||
1 - leg_y
|
||||
]
|
||||
leg_kwargs = dict(
|
||||
ncols=len(target_species),
|
||||
loc='upper center',
|
||||
bbox_to_anchor=leg_box,
|
||||
frameon=False,
|
||||
prop=dict(
|
||||
size=15,
|
||||
style='italic',
|
||||
),
|
||||
borderpad=0,
|
||||
borderaxespad=0,
|
||||
handlelength=1,
|
||||
columnspacing=1,
|
||||
)
|
||||
text_kwargs = dict(
|
||||
x=1,
|
||||
y=1,
|
||||
fontsize=14,
|
||||
ha='right',
|
||||
va='top',
|
||||
)
|
||||
|
||||
# Prepare graph:
|
||||
fig, axes = plt.subplots(**fig_kwargs)
|
||||
xlabel(axes[-1], xlab, **xlab_kwargs, transform=fig.transFigure)
|
||||
super_ylabel(ylab, fig, axes[0], axes[-1], **ylab_kwargs)
|
||||
|
||||
# Run through species:
|
||||
handles = []
|
||||
for species, ax in zip(target_species, axes):
|
||||
color = colors[species]
|
||||
|
||||
# Load species data:
|
||||
path = search_files(species, dir=data_path)[0]
|
||||
data = dict(np.load(path))
|
||||
hist = data['hist']
|
||||
bins = data['bins']
|
||||
n_songs = data['crit_scales'].size
|
||||
|
||||
# Plot distribution of saturation points:
|
||||
handles.append(ax.bar(bins, hist, width=bins[1] - bins[0], fc=color, **bar_kwargs))
|
||||
ax.set_ylim(0, hist.max() * 1.05)
|
||||
|
||||
# Indicate mean of distribution:
|
||||
ax.axvline(data['crit_scales'].mean(), **mean_kwargs)
|
||||
|
||||
# Indicate number of songs:
|
||||
ax.text(**text_kwargs, s=f'n = {n_songs}', transform=ax.transAxes)
|
||||
|
||||
# Posthocs:
|
||||
labels = [shorten_species(species) for species in target_species]
|
||||
fig.legend(handles, labels, **leg_kwargs)
|
||||
ax.set_xlim(0, bins[-1])
|
||||
|
||||
# Save graph:
|
||||
fig.savefig(save_path)
|
||||
plt.show()
|
||||
|
||||
|
||||
@@ -48,6 +48,28 @@ def sort_files_by_rec(paths, sources=['BM04', 'BM93', 'DJN', 'GBC', 'FTN']):
|
||||
sorted_paths = [path for paths in sorted_paths.values() for path in paths]
|
||||
return sorted_paths
|
||||
|
||||
def get_histogram(data, edges=None, nbins=50, pad=0.1, shared=True):
|
||||
if edges is None:
|
||||
axis = None if shared else 0
|
||||
min_data, max_data = data.min(axis=axis), data.max(axis=axis)
|
||||
pad = pad * (max_data - min_data)
|
||||
if shared or data.ndim == 1:
|
||||
edges = np.linspace(min_data - pad, max_data + pad, nbins + 1)
|
||||
else:
|
||||
edges = np.zeros((nbins + 1, data.shape[1]))
|
||||
for i, mini, maxi, padi in enumerate(zip(min_data, max_data, pad)):
|
||||
edges[:, i] = np.linspace(mini - padi, maxi + padi, nbins + 1)
|
||||
|
||||
centers = edges[:-1] + np.diff(edges, axis=0) / 2
|
||||
if data.ndim == 1:
|
||||
hists, _ = np.histogram(data, bins=edges, density=True)
|
||||
else:
|
||||
hists = np.zeros((nbins, data.shape[1]))
|
||||
for i in range(data.shape[1]):
|
||||
bins = edges if shared else edges[:, i]
|
||||
hists[:, i], _ = np.histogram(data[:, i], bins=bins, density=True)
|
||||
return hists, centers
|
||||
|
||||
def get_kde(data, sigma, axis=None, n=1000, pad=10):
|
||||
if axis is None:
|
||||
axis = np.linspace(data.min() - pad * sigma, data.max() + pad * sigma, n)
|
||||
|
||||
@@ -150,8 +150,8 @@ def super_xlabel(label, fig, left_ax, right_ax, y=0.005,
|
||||
|
||||
def super_ylabel(label, fig, low_ax, high_ax, x=0.005,
|
||||
high_fig=None, low_fig=None, **kwargs):
|
||||
low_y = high_ax.get_position().y0
|
||||
high_y = low_ax.get_position().y1
|
||||
low_y = low_ax.get_position().y0
|
||||
high_y = high_ax.get_position().y1
|
||||
if low_fig is not None or high_fig is not None:
|
||||
trans_fig = get_trans_artist(fig)
|
||||
if low_fig is not None:
|
||||
|
||||
62
python/save_field_data.py
Normal file
62
python/save_field_data.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import numpy as np
|
||||
from thunderhopper.filetools import search_files, crop_paths
|
||||
from thunderhopper.model import configuration, process_signal
|
||||
from thunderhopper.modeltools import load_data
|
||||
from IPython import embed
|
||||
|
||||
## SETTINGS:
|
||||
|
||||
# General:
|
||||
search_target = '*'
|
||||
mode = ['song', 'noise'][1]
|
||||
input_folder = f'../data/field/raw/{mode}/'
|
||||
output_folder = f'../data/field/processed/{mode}/'
|
||||
stages = ['raw', 'norm']
|
||||
if False:
|
||||
# Overwrites edited:
|
||||
stages.append('songs')
|
||||
|
||||
# Interactivity:
|
||||
reload_saved = False
|
||||
gui = True
|
||||
|
||||
# Processing:
|
||||
env_rate = 96000.0
|
||||
feat_rate = 96000.0
|
||||
sigmas = [0.001, 0.002, 0.004, 0.008, 0.016, 0.032]
|
||||
types = [1, -1, 2, -2, 3, -3, 4, -4, 5, -5,
|
||||
6, -6, 7, -7, 8, -8, 9, -9, 10, -10]
|
||||
config = configuration(env_rate, feat_rate, types=types, sigmas=sigmas)
|
||||
config.update({
|
||||
'channel': None,
|
||||
'rate_ratio': None,
|
||||
'env_fcut': 250,
|
||||
'db_ref': 1,
|
||||
'inv_fcut': 10,
|
||||
'feat_thresh': np.load('../data/kernel_thresholds.npy') * 0.2,
|
||||
'feat_fcut': 0.5,
|
||||
'label_channels': np.array([0]),
|
||||
'label_thresh': 0.5,
|
||||
})
|
||||
|
||||
## PREPARATION:
|
||||
|
||||
# Fetch WAV recording files:
|
||||
input_paths = search_files(search_target, ext='wav', dir=input_folder)
|
||||
path_names = crop_paths(input_paths)
|
||||
|
||||
# PROCESSING:
|
||||
|
||||
# Run processing pipeline:
|
||||
for path, name in zip(input_paths, path_names):
|
||||
print('Processing:', name)
|
||||
|
||||
# Fetch and store representations:
|
||||
save = None if output_folder is None else output_folder + f'{name}.npz'
|
||||
process_signal(config, stages, path, save=save, label_edit=gui)
|
||||
|
||||
# Cross-control:
|
||||
if reload_saved:
|
||||
data, params = load_data(save, stages, ['songs'])
|
||||
embed()
|
||||
print('Done.')
|
||||
87
python/save_inv_data_field.py
Normal file
87
python/save_inv_data_field.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import numpy as np
|
||||
from thunderhopper.modeltools import load_data, save_data
|
||||
from thunderhopper.filetools import search_files, crop_paths
|
||||
from thunderhopper.filtertools import find_kern_specs
|
||||
from thunderhopper.model import process_signal
|
||||
from IPython import embed
|
||||
|
||||
# GENERAL SETTINGS:
|
||||
target = '*'
|
||||
example_file = 'Pseudochorthippus_parallelus_micarray-short_JJ_20240815T160355-20240815T160755-1m10s690ms-1m13s614ms'
|
||||
mode = ['song', 'noise'][1]
|
||||
search_path = f'../data/field/processed/{mode}/'
|
||||
data_paths = search_files(target, ext='npz', dir=search_path)
|
||||
stages = ['raw', 'filt', 'env', 'log', 'inv', 'conv', 'feat']
|
||||
save_path = f'../data/inv/field/{mode}/'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
distances = np.load('../data/field/recording_distances.npy')
|
||||
|
||||
# SUBSET SETTINGS:
|
||||
kernels = np.array([
|
||||
[1, 0.002],
|
||||
[-1, 0.002],
|
||||
[2, 0.004],
|
||||
[-2, 0.004],
|
||||
[3, 0.032],
|
||||
[-3, 0.032]
|
||||
])
|
||||
kernels = None
|
||||
types = None#np.array([-1])
|
||||
sigmas = None#np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032])
|
||||
|
||||
# EXECUTION:
|
||||
for data_path, name in zip(data_paths, crop_paths(data_paths)):
|
||||
save_detailed = example_file in name
|
||||
print(f'Processing {name}')
|
||||
|
||||
# Get song recording (prior to anything):
|
||||
data, config = load_data(data_path, files='raw')
|
||||
song, rate = data['raw'], config['rate']
|
||||
|
||||
# Reduce to kernel subset:
|
||||
if any(var is not None for var in [kernels, types, sigmas]):
|
||||
kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas)
|
||||
config['kernels'] = config['kernels'][:, kern_inds]
|
||||
config['k_specs'] = config['k_specs'][kern_inds, :]
|
||||
config['k_props'] = [config['k_props'][i] for i in kern_inds]
|
||||
config['feat_thresh'] = config['feat_thresh'][kern_inds]
|
||||
|
||||
# Get song segment to be analyzed:
|
||||
time = np.arange(song.shape[0]) / rate
|
||||
start, end = data['songs_0'].ravel()
|
||||
segment = (time >= start) & (time <= end)
|
||||
|
||||
# Prepare storage:
|
||||
measures = {}
|
||||
if save_detailed:
|
||||
snippets = {}
|
||||
|
||||
# Process snippet:
|
||||
signals, rates = process_signal(config, returns=stages, signal=song, rate=rate)
|
||||
|
||||
# Store results:
|
||||
for stage in stages:
|
||||
# Log intensity measures:
|
||||
mkey = f'measure_{stage}'
|
||||
if stage == 'feat':
|
||||
measures[mkey] = signals[stage][segment, ...].mean(axis=0)
|
||||
else:
|
||||
measures[mkey] = signals[stage][segment, ...].std(axis=0)
|
||||
|
||||
# Log optional snippet data:
|
||||
if save_detailed:
|
||||
snippets[f'snip_{stage}'] = signals[stage]
|
||||
|
||||
# Save analysis results:
|
||||
if save_path is not None:
|
||||
data = dict(
|
||||
distances=distances,
|
||||
)
|
||||
data.update(measures)
|
||||
if save_detailed:
|
||||
data.update(snippets)
|
||||
save_data(save_path + name, data, config, overwrite=True)
|
||||
|
||||
print('Done.')
|
||||
embed()
|
||||
@@ -16,7 +16,7 @@ target_species = [
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
][0]
|
||||
][4]
|
||||
example_file = {
|
||||
'Chorthippus_biguttulus': 'Chorthippus_biguttulus_GBC_94-17s73.1ms-19s977ms',
|
||||
'Chorthippus_mollis': 'Chorthippus_mollis_DJN_41_T28C-46s4.58ms-1m15s697ms',
|
||||
|
||||
@@ -17,7 +17,7 @@ target_species = [
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
][5]
|
||||
][6]
|
||||
example_file = {
|
||||
'Chorthippus_biguttulus': 'Chorthippus_biguttulus_GBC_94-17s73.1ms-19s977ms',
|
||||
'Chorthippus_mollis': 'Chorthippus_mollis_DJN_41_T28C-46s4.58ms-1m15s697ms',
|
||||
@@ -31,7 +31,7 @@ data_paths = search_files(target_species, dir='../data/processed/')
|
||||
noise_path = '../data/processed/white_noise_sd-1.npz'
|
||||
ref_path = '../data/inv/short/ref_measures.npz'
|
||||
pre_stages = ['filt', 'env']
|
||||
stages = pre_stages + ['conv', 'feat']
|
||||
stages = pre_stages + ['inv', 'conv', 'feat']
|
||||
save_path = '../data/inv/short/'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
@@ -98,6 +98,7 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)):
|
||||
measures = dict(
|
||||
measure_filt=np.zeros(shape_low, dtype=float),
|
||||
measure_env=np.zeros(shape_low, dtype=float),
|
||||
measure_inv=np.zeros(shape_low, dtype=float),
|
||||
measure_conv=np.zeros(shape_high, dtype=float),
|
||||
measure_feat=np.zeros(shape_high, dtype=float)
|
||||
)
|
||||
@@ -108,6 +109,7 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)):
|
||||
snippets = dict(
|
||||
snip_filt=np.zeros(shape_low, dtype=float),
|
||||
snip_env=np.zeros(shape_low, dtype=float),
|
||||
snip_inv=np.zeros(shape_low, dtype=float),
|
||||
snip_conv=np.zeros(shape_high, dtype=float),
|
||||
snip_feat=np.zeros(shape_high, dtype=float)
|
||||
)
|
||||
@@ -124,7 +126,9 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)):
|
||||
signal=scaled, rate=rate)
|
||||
|
||||
# Process mixture further:
|
||||
signals['conv'] = convolve_kernels(signals['env'], config['kernels'], config['k_specs'])
|
||||
signals['inv'] = sosfilter(signals['env'], rate, config['inv_fcut'], 'hp',
|
||||
padtype='constant', padlen=config['padlen'])
|
||||
signals['conv'] = convolve_kernels(signals['inv'], config['kernels'], config['k_specs'])
|
||||
signals['feat'] = sosfilter((signals['conv'] > config['feat_thresh']).astype(float),
|
||||
rate, config['feat_fcut'], 'lp',
|
||||
padtype='fixed', padlen=config['padlen'])
|
||||
|
||||
@@ -3,8 +3,13 @@ from color_functions import load_colors, shade_colors
|
||||
|
||||
# Settings:
|
||||
stages = ['conv', 'bi', 'feat']
|
||||
kern_types = np.array([1, -1, 2, -2, 3, -3, 4, -4])
|
||||
shade_factors = np.linspace(-0.6, 0.2, kern_types.size)
|
||||
mode = ['subset', 'all'][1]
|
||||
if mode == 'subset':
|
||||
kern_types = np.array([1, -1, 2, -2, 3, -3, 4, -4])
|
||||
shade_factors = np.linspace(-0.6, 0.2, kern_types.size)
|
||||
elif mode == 'all':
|
||||
kern_types = np.array([1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10])
|
||||
shade_factors = np.linspace(-0.6, 0.6, kern_types.size)
|
||||
|
||||
# Main colors:
|
||||
stage_colors = load_colors('../data/stage_colors.npz')
|
||||
@@ -15,4 +20,4 @@ for stage in stages:
|
||||
colors = {str(k): c for k, c in zip(kern_types, colors)}
|
||||
print(f'\n{stage} colors:')
|
||||
print(colors)
|
||||
np.savez(f'../data/{stage}_colors.npz', **colors)
|
||||
np.savez(f'../data/{stage}_colors_{mode}.npz', **colors)
|
||||
|
||||
@@ -16,7 +16,7 @@ stages = dict(
|
||||
log_hp=['filt', 'env', 'log', 'inv'],
|
||||
thresh_lp=['inv', 'conv', 'feat'],
|
||||
full=['raw', 'filt', 'env', 'log', 'inv', 'conv', 'feat'],
|
||||
short=['raw', 'filt', 'env', 'conv', 'feat']
|
||||
short=['raw', 'filt', 'env', 'inv', 'conv', 'feat']
|
||||
)[mode]
|
||||
|
||||
# PROCESSING:
|
||||
@@ -52,7 +52,9 @@ elif mode == 'full':
|
||||
data = process_signal(config, stages, signal=starter, rate=config['rate'])[0]
|
||||
elif mode == 'short':
|
||||
data = process_signal(config, ['raw', 'filt', 'env'], signal=starter, rate=config['rate'])[0]
|
||||
data['conv'] = convolve_kernels(data['env'], config['kernels'], config['k_specs'])
|
||||
data['inv'] = sosfilter(data['env'], config['env_rate'], config['inv_fcut'], 'hp',
|
||||
padtype='constant', padlen=config['padlen'])
|
||||
data['conv'] = convolve_kernels(data['inv'], config['kernels'], config['k_specs'])
|
||||
data['feat'] = sosfilter((data['conv'] > config['feat_thresh']).astype(float),
|
||||
config['env_rate'], config['feat_fcut'], 'lp',
|
||||
padtype='fixed', padlen=config['padlen'])
|
||||
|
||||
79
python/save_saturation_log-hp.py
Normal file
79
python/save_saturation_log-hp.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import numpy as np
|
||||
from thunderhopper.filetools import search_files
|
||||
from thunderhopper.modeltools import load_data, save_data
|
||||
from misc_functions import get_saturation
|
||||
from IPython import embed
|
||||
|
||||
# GENERAL SETTINGS:
|
||||
target_species = [
|
||||
'Chorthippus_biguttulus',
|
||||
'Chorthippus_mollis',
|
||||
'Chrysochraon_dispar',
|
||||
'Euchorthippus_declivus',
|
||||
'Gomphocerippus_rufus',
|
||||
'Omocestus_rufipes',
|
||||
'Pseudochorthippus_parallelus',
|
||||
]
|
||||
search_path = '../data/inv/log_hp/collected/'
|
||||
save_path = '../data/inv/log_hp/saturation/'
|
||||
|
||||
# ANALYSIS SETTINGS:
|
||||
plateau_settings = dict(
|
||||
low=0.05,
|
||||
high=0.95,
|
||||
first=True,
|
||||
last=True,
|
||||
condense=None,
|
||||
)
|
||||
compute_hist = True
|
||||
bins = 50
|
||||
pad = 0.05
|
||||
|
||||
# PREPARATION:
|
||||
if compute_hist:
|
||||
species_scales = []
|
||||
min_scale, max_scale = [], []
|
||||
archives = [{} for _ in target_species]
|
||||
|
||||
# EXECUTION:
|
||||
for i, species in enumerate(target_species):
|
||||
print(f'Processing {species}')
|
||||
|
||||
# Load accumulated invariance data:
|
||||
path = search_files(species, dir=search_path)[0]
|
||||
data, config = load_data(path, ['scales', 'measure_inv'])
|
||||
|
||||
# Find upper saturation point per song file:
|
||||
crit_inds = np.array(get_saturation(data['measure_inv'], **plateau_settings)[1])
|
||||
crit_scales = data['scales'][crit_inds]
|
||||
|
||||
# Output options:
|
||||
if not compute_hist:
|
||||
# Save species data immediately:
|
||||
archive = dict(crit_inds=crit_inds, crit_scales=crit_scales, scales=data['scales'])
|
||||
save_data(save_path + species, archive, config, overwrite=True)
|
||||
continue
|
||||
|
||||
# Log but don't save data yet:
|
||||
archives[i]['crit_inds'] = crit_inds
|
||||
archives[i]['crit_scales'] = crit_scales
|
||||
archives[i]['scales'] = data['scales']
|
||||
min_scale.append(crit_scales.min())
|
||||
max_scale.append(crit_scales.max())
|
||||
|
||||
# Optional histogram:
|
||||
if compute_hist:
|
||||
# Generated shared histogram edges:
|
||||
min_scale, max_scale = min(min_scale), max(max_scale)
|
||||
pad *= (max_scale - min_scale)
|
||||
edges = np.linspace(max(0, min_scale - pad), max_scale + pad, bins + 1)
|
||||
centers = edges[:-1] + np.diff(edges) / 2
|
||||
|
||||
# Compute histogram and save species data:
|
||||
for i, (species, archive) in enumerate(zip(target_species, archives)):
|
||||
hist = np.histogram(archive['crit_scales'], bins=edges, density=True)[0]
|
||||
archive['hist'] = hist
|
||||
archive['bins'] = centers
|
||||
save_data(save_path + species, archive, config, overwrite=True)
|
||||
|
||||
print('Done.')
|
||||
Reference in New Issue
Block a user