Skip to content

Commit

Permalink
Merge pull request #43 from INM-6/master
Browse files Browse the repository at this point in the history
New modules added, some module names updated, visualize convolved rate time series and bug fix
  • Loading branch information
shimoura authored Oct 20, 2023
2 parents be361ff + 111512e commit db2ce11
Show file tree
Hide file tree
Showing 8 changed files with 301 additions and 118 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,5 @@ figures/Schmidt2018_dyn/.ipynb_checkpoints/*
multiarea_model/.ipynb_checkpoints/*

multiarea_model/data_multiarea/.ipynb_checkpoints/*

tests/.ipynb_checkpoints/*
61 changes: 61 additions & 0 deletions figures/MAM2EBRAINS/M2E_compute_corrcoeff.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import json
import numpy as np
import os

import correlation_toolbox.helper as ch
from multiarea_model import MultiAreaModel
import sys

"""
Compute correlation coefficients for a subsample
of neurons for the entire network from raw spike files of a given simulation.
"""

def compute_corrcoeff(data_path, label):
load_path = os.path.join(data_path,
label,
'recordings')
save_path = os.path.join(data_path,
label,
'Analysis')

with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f:
sim_params = json.load(f)
T = sim_params['T']

tmin = 500.
subsample = 2000
resolution = 1.

"""
Create MultiAreaModel instance to have access to data structures
"""
M = MultiAreaModel({})

spike_data = {}
cc_dict = {}
for area in M.area_list:
cc_dict[area] = {}
LvR_list = []
N = []
for pop in M.structure[area]:
fp = '-'.join((label,
'spikes', # assumes that the default label for spike files was used
area,
pop))
fn = '{}/{}.npy'.format(load_path, fp)
# +1000 to ensure that we really have subsample non-silent neurons in the end
spikes = np.load(fn)
ids = np.unique(spikes[:, 0])
dat = ch.sort_gdf_by_id(spikes, idmin=ids[0], idmax=ids[0]+subsample+1000)
bins, hist = ch.instantaneous_spike_count(dat[1], resolution, tmin=tmin, tmax=T)
rates = ch.strip_binned_spiketrains(hist)[:subsample]
cc = np.corrcoef(rates)
cc = np.extract(1-np.eye(cc[0].size), cc)
cc[np.where(np.isnan(cc))] = 0.
cc_dict[area][pop] = np.mean(cc)

fn = os.path.join(save_path,
'corrcoeff.json')
with open(fn, 'w') as f:
json.dump(cc_dict, f)
132 changes: 132 additions & 0 deletions figures/MAM2EBRAINS/M2E_compute_rate_time_series.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import json
import neo
import numpy as np
import os
import quantities as pq

from multiarea_model.analysis_helpers import pop_rate_time_series
from elephant.statistics import instantaneous_rate
from multiarea_model import MultiAreaModel
import sys

"""
Compute time series of population-averaged spike rates for a given
area from raw spike files of a given simulation.
Implements three different methods:
- binned spike histograms on all neurons ('full')
- binned spike histograms on a subsample of 140 neurons ('subsample')
- spike histograms convolved with a Gaussian kernel of optimal width
after Shimazaki et al. (2010)
"""

# assert(len(sys.argv) == 5)

# data_path = sys.argv[1]
# label = sys.argv[2]
# area = sys.argv[3]
# method = sys.argv[4]

def compute_rate_time_series(M, data_path, label, area, method):
assert(method in ['subsample', 'full', 'auto_kernel'])
# subsample : subsample spike data to 140 neurons to match the Chu 2014 data
# full : use spikes of all neurons and compute spike histogram with bin size 1 ms
# auto_kernel : use spikes of all neurons and convolve with Gaussian
# kernel of optimal width using the method of Shimazaki et al. (2012)
# (see Method parts of the paper)

load_path = os.path.join(data_path,
label,
'recordings')
save_path = os.path.join(data_path,
label,
'Analysis',
'rate_time_series_{}'.format(method))
try:
os.mkdir(save_path)
except FileExistsError:
pass

with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f:
sim_params = json.load(f)
# T = sim_params['T']
T = sim_params['sim_params']['t_sim']

"""
Create MultiAreaModel instance to have access to data structures
"""
# M = MultiAreaModel({})

time_series_list = []
N_list = []
for pop in M.structure[area]:
fp = '-'.join((label,
'spikes', # assumes that the default label for spike files was used
area,
pop))
fn = '{}/{}.npy'.format(load_path, fp)
# spike_data = np.load(fn)
spike_data = np.load(fn, allow_pickle=True)
spike_data = spike_data[np.logical_and(spike_data[:, 1] > 500.,
spike_data[:, 1] <= T)]
if method == 'subsample':
all_gid = np.unique(spike_data[:, 0])
N = int(np.round(140 * M.N[area][pop] / M.N[area]['total']))

i = 0
s = 0
gid_list = []
while s < N:
rate = spike_data[:, 1][spike_data[:, 0] == all_gid[i]].size / (1e-3 * (T - 500.))
if rate > 0.56:
gid_list.append(all_gid[i])
s += 1
i += 1
spike_data = spike_data[np.isin(spike_data[:, 0], gid_list)]
kernel = 'binned_subsample'
if method == 'full':
N = M.N[area][pop] # Assumes that all neurons were recorded
kernel = 'binned'

if method in ['subsample', 'full']:
time_series = pop_rate_time_series(spike_data, N, 500., T,
resolution=1.)

if method == 'auto_kernel':
# To reduce the computational load, the time series is only computed until 10500. ms
T = 10500.
N = M.N[area][pop] # Assumes that all neurons were recorded
st = neo.SpikeTrain(spike_data[:, 1] * pq.ms, t_stop=T*pq.ms)
time_series = instantaneous_rate(st, 1.*pq.ms, t_start=500.*pq.ms, t_stop=T*pq.ms)
time_series = np.array(time_series)[:, 0] / N

kernel = 'auto'

time_series_list.append(time_series)
N_list.append(N)

fp = '_'.join(('rate_time_series',
method,
area,
pop))
np.save('{}/{}.npy'.format(save_path, fp), time_series)

time_series_list = np.array(time_series_list)
area_time_series = np.average(time_series_list, axis=0, weights=N_list)

fp = '_'.join(('rate_time_series',
method,
area))
np.save('{}/{}.npy'.format(save_path, fp), area_time_series)

par = {'areas': M.area_list,
'pops': 'complete',
'kernel': kernel,
'resolution': 1.,
't_min': 500.,
't_max': T}
fp = '_'.join(('rate_time_series',
method,
'Parameters.json'))
with open('{}/{}'.format(save_path, fp), 'w') as f:
json.dump(par, f)
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def load_and_create_data(M, A, raster_areas):
- 'alpha_time_window' : time constant of the alpha function
- 'rect_time_window' : width of the moving rectangular function
"""
A.create_rate_time_series()
# A.create_rate_time_series()
# print("Computing rate time series done")


Expand Down Expand Up @@ -188,4 +188,5 @@ def load_and_create_data(M, A, raster_areas):
# try:
# subprocess.run(['python3', './Schmidt2018_dyn/compute_bold_signal.py'])
# except FileNotFoundError:
# raise FileNotFoundError("Executing R failed. Did you install R?")
# raise FileNotFoundError("Executing R failed. Did you install R?")

59 changes: 38 additions & 21 deletions figures/MAM2EBRAINS/M2E_visualize_resting_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@
icolor = myred
ecolor = myblue

from M2E_LOAD_DATA import load_and_create_data
from M2E_load_data import load_and_create_data
# from M2E_compute_corrcoeff import compute_corrcoeff
from M2E_compute_rate_time_series import compute_rate_time_series

def set_boxplot_props(d):
for i in range(len(d['boxes'])):
Expand Down Expand Up @@ -66,6 +68,17 @@ def plot_resting_state(M, data_path, raster_areas=['V1', 'V2', 'FEF']):
# load data
load_and_create_data(M, A, raster_areas)

# compute correlation_coefficient
# compute_correcoeff(data_path, M.simulation.label)

# compute rate_time_series_full
for area in raster_areas:
compute_rate_time_series(M, data_path, M.simulation.label, area, 'full')

# compute rate_time_series_auto_kernel
for area in raster_areas:
compute_rate_time_series(M, data_path, M.simulation.label, area, 'auto_kernel')

t_sim = M.simulation.params["t_sim"]

"""
Expand All @@ -75,7 +88,7 @@ def plot_resting_state(M, data_path, raster_areas=['V1', 'V2', 'FEF']):
nrows = 4
ncols = 4
# width = 7.0866
width = 12
width = 10
panel_wh_ratio = 0.7 * (1. + np.sqrt(5)) / 2. # golden ratio

height = width / panel_wh_ratio * float(nrows) / ncols
Expand Down Expand Up @@ -115,6 +128,7 @@ def plot_resting_state(M, data_path, raster_areas=['V1', 'V2', 'FEF']):
for area in raster_areas:
if area not in area_list:
raise Exception("Error! Given raster areas are either not from complete_area_list, please input correct areas to diaply the raster plots.")

areas = raster_areas

labels = ['A', 'B', 'C']
Expand Down Expand Up @@ -188,8 +202,12 @@ def plot_resting_state(M, data_path, raster_areas=['V1', 'V2', 'FEF']):
# Create MultiAreaModel instance to have access to data structures
# """
# M = MultiAreaModel({})

# spike data

spike_data = A.spike_data
label_spikes = M.simulation.label
label = M.simulation.label

# # spike data
# spike_data = {}
# for area in areas:
# spike_data[area] = {}
Expand All @@ -199,9 +217,6 @@ def plot_resting_state(M, data_path, raster_areas=['V1', 'V2', 'FEF']):
# 'recordings',
# '{}-spikes-{}-{}.npy'.format(label_spikes,
# area, pop)))
spike_data = A.spike_data
label_spikes = M.simulation.label
label = M.simulation.label

# stationary firing rates
fn = os.path.join(data_path, label, 'Analysis', 'pop_rates.json')
Expand All @@ -211,23 +226,23 @@ def plot_resting_state(M, data_path, raster_areas=['V1', 'V2', 'FEF']):
# time series of firing rates
rate_time_series = {}
for area in areas:
# fn = os.path.join(data_path, label,
# 'Analysis',
# 'rate_time_series_full',
# 'rate_time_series_full_{}.npy'.format(area))
fn = os.path.join(data_path, label,
'Analysis',
'rate_time_series-{}.npy'.format(area))
'rate_time_series_full',
'rate_time_series_full_{}.npy'.format(area))
# fn = os.path.join(data_path, label,
# 'Analysis',
# 'rate_time_series-{}.npy'.format(area))
rate_time_series[area] = np.load(fn)

# # time series of firing rates convolved with a kernel
# rate_time_series_auto_kernel = {}
# for area in areas:
# fn = os.path.join(data_path, label,
# 'Analysis',
# 'rate_time_series_auto_kernel',
# 'rate_time_series_auto_kernel_{}.npy'.format(area))
# rate_time_series_auto_kernel[area] = np.load(fn)
# time series of firing rates convolved with a kernel
rate_time_series_auto_kernel = {}
for area in areas:
fn = os.path.join(data_path, label,
'Analysis',
'rate_time_series_auto_kernel',
'rate_time_series_auto_kernel_{}.npy'.format(area))
rate_time_series_auto_kernel[area] = np.load(fn)

# local variance revised (LvR)
fn = os.path.join(data_path, label, 'Analysis', 'pop_LvR.json')
Expand Down Expand Up @@ -445,7 +460,9 @@ def plot_resting_state(M, data_path, raster_areas=['V1', 'V2', 'FEF']):
np.logical_and(time >= t_min, time < t_max))]
ax[i].plot(time, binned_spikes, color=colors[0], label=area)
# rate = rate_time_series_auto_kernel[area]
# ax[i].plot(time, rate, color=colors[2], label=area)
rate = rate_time_series_auto_kernel[area][np.where(
np.logical_and(time >= t_min, time < t_max))]
ax[i].plot(time, rate, color=colors[2], label=area)
ax[i].set_xlim((t_min, t_max))

ax[i].text(0.8, 0.7, area, transform=ax[i].transAxes)
Expand Down
Loading

0 comments on commit db2ce11

Please sign in to comment.