-
Notifications
You must be signed in to change notification settings - Fork 15
/
save_lca_stream.py
152 lines (129 loc) · 6.71 KB
/
save_lca_stream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
'''
Copyright (c) 2019 Uber Technologies, Inc.
Licensed under the Uber Non-Commercial License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at the root directory of this project.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import print_function
from __future__ import division
import numpy as np
import argparse
import h5py
import os
import time
from ast import literal_eval
import sys
from general.util import mkdir_p
import plot_util as util
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('resdir', type=str, help='Path to dir containing weights and gradients file.')
parser.add_argument('--custom_filename', type=str, default='', help='Filename if not gradients_adaptive')
parser.add_argument('--chunk_size', type=int, default=200, help='chunk size (iterations)')
parser.add_argument('--max_iters', type=int, default=9999999999, help='only do first n iterations')
parser.add_argument('--first_order', action='store_true', help='calculate with first order instead of rk4')
return parser
def stream_helped_rk_adaptive(weights, splits_per_iter, grads_train_list, grads_test_list, helped_train, helped_test, args):
coeffs = {}
for i in [2, 4, 8, 16, 32]:
coeffs[i] = util.get_rk_coeffs(i)
iters_to_calc = min(args.max_iters, weights.shape[0] - 1)
weights_prev = weights[0]
timerstart = time.time()
list_ind = 0 # which np array in grads_train_list to use, from 0 to 3
grads_ind = 0 # which index in grads_train_list[list_ind] we're currently at
for chunk in range(0, iters_to_calc, args.chunk_size):
buffer_helped_train = np.zeros((args.chunk_size, weights.shape[1]))
buffer_helped_test = np.zeros((args.chunk_size, weights.shape[1]))
for i in range(args.chunk_size):
ts = chunk + i
num_splits = splits_per_iter[ts]
grads_train_iter = grads_train_list[list_ind][grads_ind:grads_ind + num_splits + 1]
grads_test_iter = grads_test_list[list_ind][grads_ind:grads_ind + num_splits + 1]
k_train = np.matmul(coeffs[num_splits], grads_train_iter) / coeffs[num_splits].sum()
k_test = np.matmul(coeffs[num_splits], grads_test_iter) / coeffs[num_splits].sum()
weights_next = weights[ts + 1]
buffer_helped_train[i] = np.multiply(k_train, weights_next - weights_prev)
buffer_helped_test[i] = np.multiply(k_test, weights_next - weights_prev)
grads_ind += num_splits
if grads_ind + 1 >= grads_train_list[list_ind].shape[0]:
list_ind += 1
grads_ind = 0
weights_prev = weights_next
# write every chunk_size
helped_train[chunk : chunk + args.chunk_size] = buffer_helped_train
helped_test[chunk : chunk + args.chunk_size] = buffer_helped_test
print('Completed {}/{} iterations ({:.2f} s)'.format(
ts + 1, iters_to_calc, time.time() - timerstart))
# used for side experiments
def stream_helped_first_order(weights, splits_per_iter, grads_train_list, grads_test_list, helped_train, helped_test, args):
iters_to_calc = min(args.max_iters, weights.shape[0] - 1)
weights_prev = weights[0]
timerstart = time.time()
list_ind = 0 # which np array in grads_train_list to use, from 0 to 3
grads_ind = 0 # which index in grads_train_list[list_ind] we're currently at
for chunk in range(0, iters_to_calc, args.chunk_size):
buffer_helped_train = np.zeros((args.chunk_size, weights.shape[1]))
buffer_helped_test = np.zeros((args.chunk_size, weights.shape[1]))
for i in range(args.chunk_size):
ts = chunk + i
num_splits = splits_per_iter[ts]
k_train = grads_train_list[list_ind][grads_ind]
k_test = grads_test_list[list_ind][grads_ind]
weights_next = weights[ts + 1]
buffer_helped_train[i] = np.multiply(k_train, weights_next - weights_prev)
buffer_helped_test[i] = np.multiply(k_test, weights_next - weights_prev)
grads_ind += num_splits
if grads_ind + 1 >= grads_train_list[list_ind].shape[0]:
list_ind += 1
grads_ind = 0
weights_prev = weights_next
# write every chunk_size
helped_train[chunk : chunk + args.chunk_size] = buffer_helped_train
helped_test[chunk : chunk + args.chunk_size] = buffer_helped_test
print('Completed {}/{} iterations ({:.2f} s)'.format(
ts + 1, iters_to_calc, time.time() - timerstart))
def get_streamds_list(hf, keyroot, chunk_size):
grads_list = [util.streamds(hf['{}_0'.format(keyroot)], chunk_size)]
for i in range(1, 4):
nextkey = '{}_{}'.format(keyroot, i)
if nextkey in hf.keys():
grads_list.append(util.streamds(hf[nextkey], chunk_size))
return grads_list
def main():
parser = make_parser()
args = parser.parse_args()
# read weights
hf_weights = h5py.File(args.resdir + '/weights', 'r')
weights = util.streamds(hf_weights['all_weights'], args.chunk_size)
assert min(args.max_iters, weights.shape[0] - 1) % args.chunk_size == 0, 'make chunk_size divide evenly into num iters'
# read gradients
gradients_filename = '/gradients_adaptive'
if args.custom_filename:
gradients_filename = '/' + args.custom_filename
hf_gradients = h5py.File(args.resdir + gradients_filename, 'r')
grads_train_list = get_streamds_list(hf_gradients, 'grads_train', args.chunk_size)
grads_test_list = get_streamds_list(hf_gradients, 'grads_test', args.chunk_size)
splits_per_iter = np.array(hf_gradients['num_splits'])
# set up output
suffix = ''
if args.max_iters < weights.shape[0] - 1:
suffix = '_{}iters'.format(args.max_iters)
filename = '/helped_first_order' if args.first_order else '/helped'
hf_helped = h5py.File(args.resdir + filename + suffix, 'w-')
helped_train = hf_helped.create_dataset('helped', (min(args.max_iters, weights.shape[0] - 1),weights.shape[1]),
dtype='f4', compression='gzip')
helped_test = hf_helped.create_dataset('helped_test', (min(args.max_iters, weights.shape[0] - 1), weights.shape[1]),
dtype='f4', compression='gzip')
# calculate helped and write to file
if args.first_order:
stream_helped_first_order(weights, splits_per_iter, grads_train_list, grads_test_list, helped_train, helped_test, args)
else:
stream_helped_rk_adaptive(weights, splits_per_iter, grads_train_list, grads_test_list, helped_train, helped_test, args)
hf_helped.close()
hf_weights.close()
hf_gradients.close()
if __name__ == '__main__':
main()