forked from markf6/pycorr_iceflow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pycorr_iceflow_v1.1.py
2439 lines (2201 loc) · 149 KB
/
pycorr_iceflow_v1.1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import numpy as np
from numpy.polynomial import polynomial # this is for the bilinear offset correction
from scipy.interpolate import RectBivariateSpline as RBS
import cv2
# old environments may have gdal and osr as separate installs - check here (gdc not used now?)
try:
import gdal
import gdalconst as gdc # constants for gdal - e.g. GA_ReadOnly, GA_Update ( http://www.gdal.org )
import osr
except:
from osgeo import gdal
from osgeo import gdalconst as gdc
from osgeo import osr
import os
import subprocess as sp
import string
import random
import sys
import time
import datetime as dt
import argparse
# gaussian_filter from the scipy.ndimage
from scipy.ndimage import gaussian_filter
from matplotlib.pyplot import get_cmap
import netCDF4
import re
# this is all for using the its-live land masks that are on S3
import boto3
import fiona
from shapely.geometry import Point, Polygon, shape
import pyproj # used to find lon,lat from img1 midpoint
def return_its_live_land_mask_URL(lat=None,lon=None):
with fiona.open('s3://its-live-data/autorift_parameters/v001/autorift_landice_0120m.shp') as its_live_zones:
zones=[]
for feature in its_live_zones:
zones.append((shape(feature['geometry']),feature))
pt = Point(lon,lat)
ptzone = [l for geom,l in zones if geom.contains(pt)] # should only return 1 feature in list
landmask_tif_url = ptzone[0]['properties']['land']
return(landmask_tif_url)
# pycorr_iceflow_v1.1 drops (comments out) speed_ref correction and bilinear offset correction, leaving land mask based constant x and y offset correction
# it adds the ability to use the its-live cloud based land mask (requires web connection) rather than local mask file
# pycorr_iceflow_v1 started life as experimental_sentinel2_sc_pycorr_nc_oc_v5p8p1_py3.py on 4/21/2021
# added -lgo_mask_limit_land_offset so that by default land and ocean pixels would not be search limited - good for fast tidewater and surging slow over oceans
# v5p0 is an update to v4 that adds masks/offset correction for Alaska and eventually Greenland using glacier/land/ocean masks
# the point0 (p0) is to indicate that this isn't complete yet - will remove that designation when it works right...
# 5p1 got most of this done - first working lgo mask update - limited search box to 5 pixels for land.
# 5p2 added save of lgo mast to nc file, limited search box to 5 pixels for oceans as well as land.
# 5p3 added bilinear offset correction of i and j offsets for land pixels, which improved warping-caused signal;
# compression in nc file may have been added here or prototyped in 5p2-derived version earlier. Compression here has issues with gdal
# because of bottom-up writing of datasets
# 5p4 fixed GDAL bottom-up issue by writing data and y coordinates out to nc file in top-down, with compression.
# 5p5 just frees the memory from the hp images before it does the offset correction, so that the process memory footprint doesn't get any bigger
# 5p6 adds the possibility of use of BOTH an lgo mask and velocity mask for offset correction (called full_stationary for lack of a better term) - this is envisioned now ONLY FOR GREENLAND NOW - maybe more places later - ALSO REQUIRES new -Greenland flag set.
# 5p7 the changes to implement the goal of 5p6 (adding Greenland speed ref vv,vx,vy offset correction like that used in Antarctica) got complicated - so when I started on figuring out
# how to reproject the vx and vy components for mid-speed area offset correction, I created this new version number so I wouldn't step all over 5p6 code...
#
# 5p8 this now has Greenland offset correction, changes to output nc file (includes del_i and del_j, as well as bilinear offset correction fields, if they were used,
# and basic info in attributes about input files and original pixel/projection data)
# also fixed ref_velocity nodata issue - didn't make it through reprojection.
#
# psutil is to trace memory footprint, but is not on sc system, so remove these references there
try:
import psutil
psutil_available=True
print('psutil found')
def memory_usage_psutil():
# return the memory usage in MB
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
return mem
except:
psutil_available=False
print('psutil not found, proceeding without psutil memory usage reports')
# try:
# import resource
# resource_available=True
# print('resource found')
# def memory_usage_resource():
# # return the memory usage in MB
# usage = resource.getrusage(resource.RUSAGE_SELF)
# return('%7.3f local %7.3f stack %7.3f max memory'%(float(usage.ru_idrss)/float(2 ** 20),float(usage.ru_isrss)/float(2 ** 20),float(usage.ru_maxrss)/float(2 ** 20)))
# except:
# resource_available=False
# print('resource package not found, proceeding without resource memory usage reports')
resource_available=False
##########################################################################################
# GeoImg_noload
###############
#
# modified 9/7/2015 for LO8 fname -> date
# modified to noload for sc application - don't read image on setup - will read image data in main code to hp filter, delete...and keep memory footprint small
#
# modified 7/10/2017 for Collection 1 file names - recognize input filename as C1, parse first yyyyMMDD for acquisition date, set doy field...
#
# modified 4/4/2018 GeoImg_noload checks for successful file open, exits with status(0) but prints an error line if open fails
#
##########################################################################################
class GeoImg_noload:
"""geocoded image input and info
a=GeoImg(in_file_name,indir='.', datestr=None, datefmt='%m/%d/%y')
a.img will contain image
a.parameter etc...
datefmt is datetime format string dt.datetime.strptime()"""
# LXSS_LLLL_PPPRRR_YYYYMMDD_yyymmdd_CC_TX, in which:
# L = Landsat
# X = Sensor
# SS = Satellite
# PPP = WRS path
# RRR = WRS row
# YYYYMMDD = Acquisition date
# yyyymmdd = Processing date
# CC = Collection number
# TX = Collection category
def __init__(self, in_filename,in_dir='.',datestr=None,datefmt='%m/%d/%y'):
self.filename = in_filename
self.in_dir_path = in_dir #in_dir can be relative...
self.in_dir_abs_path=os.path.abspath(in_dir) # get absolute path for later ref if needed
self.gd=gdal.Open(self.in_dir_path + '/' + self.filename)
if not(self.gd):
print('Error: open of %s failed: gdal_error: %s'%(self.in_dir_path + os.path.sep + self.filename, gdal.GetLastErrorMsg()))
sys.exit(0)
self.nodata_value=self.gd.GetRasterBand(1).GetNoDataValue()
self.srs=osr.SpatialReference(wkt=self.gd.GetProjection())
self.gt=self.gd.GetGeoTransform()
self.proj=self.gd.GetProjection()
self.intype=self.gd.GetDriver().ShortName
self.min_x=self.gt[0]
self.max_x=self.gt[0]+self.gd.RasterXSize*self.gt[1]
self.min_y=self.gt[3]+self.gt[5]*self.gd.RasterYSize
self.max_y=self.gt[3]
self.pix_x_m=self.gt[1]
self.pix_y_m=self.gt[5]
self.num_pix_x=self.gd.RasterXSize
self.num_pix_y=self.gd.RasterYSize
self.XYtfm=np.array([self.min_x,self.max_y,self.pix_x_m,self.pix_y_m]).astype('float')
if (datestr is not None): # date specified in GeoImg call directly - could be any GeoTiff...
self.imagedatetime=dt.datetime.strptime(datestr,datefmt)
elif (self.filename.count('_')>=7 and self.filename[0]=='L'): # looks like collection 1 landsat
b=self.filename.split('_')
self.sensor=b[0]
self.path=int(b[2][0:3])
self.row=int(b[2][3:6])
self.year=int(b[3][0:4])
self.imagedatetime=dt.datetime.strptime(b[3],'%Y%m%d')
self.doy=self.imagedatetime.timetuple().tm_yday
elif ((self.filename.find('LC8') == 0) | (self.filename.find('LO8') == 0) | \
(self.filename.find('LE7') == 0) | (self.filename.find('LT5') == 0) | \
(self.filename.find('LT4') == 0)): # looks landsat like (old filenames) - try parsing the date from filename (contains day of year)
self.sensor=self.filename[0:3]
self.path=int(self.filename[3:6])
self.row=int(self.filename[6:9])
self.year=int(self.filename[9:13])
self.doy=int(self.filename[13:16])
self.imagedatetime=dt.datetime.fromordinal(dt.date(self.year-1,12,31).toordinal()+self.doy)
elif ( (self.filename.find('S2A') == 0) | (self.filename.find('S2B') == 0) | \
((self.filename.find('T') == 0) & (self.filename.find('_') == 6)) ): # looks like sentinal 2 data (old or new format) - try parsing the date from filename (contains day of year)
if self.filename.find('S2') == 0: # old format Sentinel 2 data
self.sensor=self.filename[0:3]
b=re.search('_(?P<date>\d{8})T(?P<time>\d{6})_T(?P<tile>[A-Z0-9]{5})_A(?P<orbit>\d{6})_R(?P<rel_orbit>\d{3})_',self.filename)
self.path=np.mod(int(b.group('orbit')),143)+3 # why + 3? there is an offset between rel_orbit and absolute orbit numbers for S2A
self.tile=b.group('tile')
self.imagedatetime=dt.datetime.strptime(b.group('date'),'%Y%m%d')
else:
self.sensor='S2' # would have to get S2A or when it flies S2B from full file path, which I may not maintain
b=re.search('T(?P<tile>[A-Z0-9]{5})_(?P<date>\d{8})T(?P<time>\d{6})',self.filename)
self.tile=b.group('tile')
self.imagedatetime=dt.datetime.strptime(b.group('date'),'%Y%m%d')
else:
self.imagedatetime=None # need to throw error in this case...or get it from metadata
# self.img=self.gd.ReadAsArray().astype(np.float32) # works for L8 and earlier - and openCV correlation routine needs float or byte so just use float...
def imageij2XY(self,ai,aj,outx=None,outy=None):
it = np.nditer([ai,aj,outx,outy],
flags = ['external_loop', 'buffered'],
op_flags = [['readonly'],['readonly'],
['writeonly', 'allocate', 'no_broadcast'],
['writeonly', 'allocate', 'no_broadcast']])
for ii,jj,ox,oy in it:
ox[...]=(self.XYtfm[0]+((ii+0.5)*self.XYtfm[2]));
oy[...]=(self.XYtfm[1]+((jj+0.5)*self.XYtfm[3]));
return np.array(it.operands[2:4])
def XY2imageij(self,ax,ay,outi=None,outj=None):
it = np.nditer([ax,ay,outi,outj],
flags = ['external_loop', 'buffered'],
op_flags = [['readonly'],['readonly'],
['writeonly', 'allocate', 'no_broadcast'],
['writeonly', 'allocate', 'no_broadcast']])
for xx,yy,oi,oj in it:
oi[...]=((xx-self.XYtfm[0])/self.XYtfm[2])-0.5; # if python arrays started at 1, + 0.5
oj[...]=((yy-self.XYtfm[1])/self.XYtfm[3])-0.5; # " " " " "
return np.array(it.operands[2:4])
# self.img=self.gd.ReadAsArray().astype(np.uint8) # L7 and earlier - doesn't work with plt.imshow...
# self.img_ov2=self.img[0::2,0::2]
# self.img_ov10=self.img[0::10,0::10]
def output_float32array_as_geotiff(outarray, name_extension='vv', outdir='.', file_name_base='', nodata_value=-9999.0):
(out_lines,out_pixels)=outarray.shape # any of the output variables would work here - just need array dimensions
out_bands = 1
format = "GTiff"
driver = gdal.GetDriverByName( format )
dst_filename = f'{outdir}/{file_name_base}_{name_extension}.tif'
dst_ds = driver.Create( dst_filename, out_pixels, out_lines, out_bands, gdal.GDT_Float32 )
print('out image %s %s %d, %d, %d'%(dst_filename, format, out_pixels, out_lines, out_bands))
if not(args.nlf):
log_output_lines.append('out image %s %s %d, %d, %d\n'%(dst_filename, format, out_pixels, out_lines, out_bands))
# dst_ds.SetGeoTransform( [ com_min_x, inc * img1.pix_x_m, 0, com_max_y, 0, inc * img1.pix_y_m ] ) # note pix_y_m typically negative
dst_ds.SetGeoTransform( [ output_array_ul_corner[0], output_array_pix_x_m, 0, output_array_ul_corner[1], 0, output_array_pix_y_m ] ) # note pix_y_m typically negative
dst_ds.SetProjection( img1.proj )
dst_ds.GetRasterBand(1).WriteArray( outarray.astype('float32') )
dst_ds.GetRasterBand(1).SetNoDataValue(np.double(nodata_value))
dst_ds = None # done, close the dataset
startproctime=time.perf_counter()
startwalltime=time.time()
t_line = "="*70
# output_log_file_fid=None # will be fid of output log file if the -nlf flag not thrown - then log to file as well as terminal.
log_output_lines=[]
def t_log(s, outlogdisabled=False,elapsed=None):
print(t_line)
print(time.strftime('UTC: %x_%X', time.gmtime()),' Wall:%8.3f'%(time.time()-startwalltime),' Proc:%8.3f'%(time.perf_counter()-startproctime), '-', s)
if elapsed:
print("Elapsed time: %s" % elapsed)
print(t_line + '\n')
if not(outlogdisabled):
log_output_lines.append(time.strftime('UTC: %x_%X', time.gmtime()) + ' Wall:%8.3f'%(time.time()-startwalltime) + ' Proc:%8.3f'%(time.perf_counter()-startproctime) + ' - ' + s + '\n')
# from accepted answer and second answer here: http://stackoverflow.com/questions/7997152/python-3d-polynomial-surface-fit-order-dependent
def polyfit2d(x, y, f, deg):
x = np.asarray(x)
y = np.asarray(y)
f = np.asarray(f)
deg = np.asarray(deg)
vander = polynomial.polyvander2d(x, y, deg)
vander = vander.reshape((-1,vander.shape[-1]))
f = f.reshape((vander.shape[0],))
# print(np.linalg.lstsq(vander, f))
c,resid = np.linalg.lstsq(vander, f)[0:2] # [0] is coefs, [1] is residuals
return c.reshape(deg+1)
# set up command line arguments
parser = argparse.ArgumentParser( \
description="""uses image to image correlation to detect offsets in surface features;
produces map of offsets in units of pixels and velocity in m/day or m/year""",
epilog='>> <<',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-imgdir',
action='store',
type=str,
default='.',
help='single source dir for both images [.]')
parser.add_argument('-img1dir',
action='store',
type=str,
default='.',
help='source dir for image 1 [.]')
parser.add_argument('-img2dir',
action='store',
type=str,
default='.',
help='source dir for image 2 [.]')
parser.add_argument('-output_dir',
action='store',
type=str,
default='.',
help='output dir [.]')
parser.add_argument('-img1datestr',
action='store',
type=str,
help='date string for image 1 [None - set from L8 filename]')
parser.add_argument('-img2datestr',
action='store',
type=str,
help='date string for image 2 [None - set from L8 filename]')
parser.add_argument('-datestrfmt',
action='store',
type=str,
default='%m/%d/%Y',
help='date string format for img1datestr and img2datestr [None - set from L8 filename] eg. %%m/%%d/%%Y - SEE: https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior')
parser.add_argument('img1_name',
action='store',
type=str,
help='image 1 filename')
parser.add_argument('img2_name',
action='store',
type=str,
help='image 2 filename')
parser.add_argument('-out_name_base',
action='store',
type=str,
default='temp_out',
help='output filename base')
parser.add_argument('-bbox',
action='store',
type=float,
metavar=('min_x','min_y','max_x','max_y'),
nargs=4,
help='bbox for feature tracking area in projection m - minx miny maxx maxy - defaults to entire common area between images')
# parser.add_argument('-plotvmin',
# action='store',
# type=float,
# default=0.0,
# help='min vel for colormap [%(default)d]')
parser.add_argument('-plotvmax',
action='store',
type=float,
default=15.0,
help='max vel for colormap [%(default)d]')
parser.add_argument('-trackvmax',
action='store',
type=float,
help='max vel that can be tracked (half_target_chip will be made larger if necessary, but if speed_ref is slow, that limit will be used...) [not set]')
parser.add_argument('-nodatavmax',
action='store',
type=float,
default=0.333,
help='max vel (m/d) that can be tracked if speedref file is used and has nodata at location (using trackvmax for this for large time separations impractical - think ocean pixels...) [%(default)f]')
parser.add_argument('-half_source_chip',
action='store',
default=10,
type=int,
help='half size of source (small) chip [%(default)d]')
parser.add_argument('-half_target_chip',
action='store',
default=20,
type=int,
help='half size of target (large or search) chip [%(default)d]')
parser.add_argument('-inc',
action='store',
type=int,
default=20,
help='inc(rement) or chip center grid spacing in pixels (must be even integer) [%(default)d]')
parser.add_argument('-gfilt_sigma',
action='store',
type=float,
default=3.0,
help='gaussian filter sigma (standard deviation in pixels for Gaussian kernel) [%(default)f]')
parser.add_argument('-dcam',
action='store',
type=float,
default=0.05,
help='min difference between corr peaks 1 and 2 for mask ( full mask statement: masked_where(((del_corr_arr<dcam)&(corr_arr<cam)) | (corr_arr<cam1)) ) [%(default)f]')
parser.add_argument('-cam',
action='store',
type=float,
default=1.0,
help='corr peak max value for dcam & cam mask (see full mask statement in -dcam help) [%(default)f]')
parser.add_argument('-cam1',
action='store',
type=float,
default=0.0,
help='corr peak value max for or mask (see full mask statement in -dcam help) [%(default)f]')
# parser.add_argument('-no_speed_ref',
# action='store_true',
# default=False,
# help='Do NOT use speed reference to set search chip sizes (and optionally to correct offsets) (use default max speed instead) [False if not raised]')
# parser.add_argument('-Greenland',
# action='store_true',
# default=False,
# help='this pair is in Greenland - needed so that vref vv mosaic is reprojected when input (unlike Antarctica) [False if not raised]')
# parser.add_argument('-speed_ref_dir',
# action='store',
# type=str,
# default='.',
# help='path for directory where speed_ref mosaic (speed in m/yr) is kept [%(default)s]')
# parser.add_argument('-speed_ref_filename',
# action='store',
# type=str,
# default='LISA_750m_vv.tif',
# help='name of speed_ref mosaic (speed in m/yr) [%(default)s]')
parser.add_argument('-lgo_mask_filename',
action='store',
type=str,
default=None,
help='file name for land_glacier_ocean mask file (used for lgo-mask-based offset correction) [%(default)s]')
parser.add_argument('-lgo_mask_file_dir',
action='store',
type=str,
default=None,
help='path to diretory containing land_glacier_ocean mask file (used for lgo-mask-based offset correction) [%(default)s]')
parser.add_argument('-use_itslive_land_mask_from_web',
action='store_true',
default=False,
help='pull a land mask from itslive.jpl.nasa.gov S3 bucket (will enable offset correction and disable local lgo_mask use) [False if not raised]')
parser.add_argument('-VRT_dir',
action='store',
type=str,
default='.',
help='path to directory where temporary .vrt files will be stored, then deleted, for access to part of speed_ref or lgo-mask mosaic [%(default)s]')
parser.add_argument('-max_allowable_pixel_offset_correction',
action='store',
type=float,
default=3.0,
help='if calculated offset correction of any type is larger than this many pixels, it will NOT be applied [%(default)f pixels]')
######################################
#
# the rest of the parameters are flags - if raised, set to true, otherwise false
#
######################################
parser.add_argument('-do_not_highpass_input_images',
action='store_true',
default=False,
help='DO NOT highpass input images before correlation - [highpass images before correlation]')
parser.add_argument('-v',
action='store_true',
default=False,
help='verbose - extra diagnostic and image info put in log file [False if not raised]')
parser.add_argument('-mpy',
action='store_true',
default=False,
help='meters per year - [meters per day if not raised]')
parser.add_argument('-log10',
action='store_true',
default=False,
help='output second rgba color image that is log10(v) (and second kmz with colorbar in addition to GTiff if requested) - [only linear color image produced]')
# parser.add_argument('-no_gtif',
# action='store_true',
# default=False,
# help='do not output default GTiff - [geoTiff always produced if not raised]')
parser.add_argument('-nlf',
action='store_true',
default=False,
help='do not output log entries in nc file that contain command line etc - [output log entries in nc file]')
parser.add_argument('-progupdates',
action='store_true',
default=False,
help='provide progress updates - [do not output progress updates]')
parser.add_argument('-output_geotiffs_instead_of_netCDF',
action='store_true',
default=False,
help='output GeoTIFFs of vx,vy,vv,del_corr instead of .nc file with layers (supports all GDAL projections, netCDF only supports UTM,PS) - [no]')
# parser.add_argument('-offset_correction_speedref',
# action='store_true',
# default=False,
# help='estimate offset at slow moving areas and correct output vels with constant vx and vy shifts (requires -speed_ref files (vv,vx,vy - vv specified)) - [False]')
parser.add_argument('-offset_correction_lgo_mask',
action='store_true',
default=False,
help='estimate offset from land pixels and make correct output vels with constant vx and vy shifts (requires -lgo_mask_file - [False]')
parser.add_argument('-lgo_mask_limit_land_offset',
action='store_true',
default=False,
help='when source chip center is a land pixel, limit search distance (requires -lgo_mask_file - [False]')
# parser.add_argument('-offset_correction_bilinear_fit',
# action='store_true',
# default=False,
# help='use planar corrections to both i and j offsets to correct misfit - currently ONLY for lgo land pixels... - [False]')
# parser.add_argument('-offset_correction_bilinear_fit_min_pixels',
# action='store',
# type=int,
# default=2000,
# help='min valid land pixels to use planar corrections to both i and j offsets to correct misfit - [%(default)d]')
args = parser.parse_args()
outdir=args.output_dir
out_name_base=args.out_name_base
out_parent_dir='./'
out_file_dir='./'
file_name_base=out_name_base
if not(args.nlf):
# outsf=open(outdir + '/' + file_name_base+"_log.txt",'w')
# output_log_file_fid=outsf # this pointer to the log file fid used in t_log...
log_output_lines.append('# log from %s\n'% sys.argv[0])
log_output_lines.append(" ".join(sys.argv) + "\n")
log_output_lines.append(str(args) + "\n")
t_log("Start Program",outlogdisabled=args.nlf)
if(args.v):
print('#', args)
# set up source directories
if(args.imgdir != "."): # single source directory specified
if((args.img1dir != ".") | (args.img2dir != ".")):
sys.exit('Error: -imgdir (specifying a single source directory for both images) cannot be used with -img1dir or -img2dir')
else:
image1dir=args.imgdir
image2dir=args.imgdir
else: # -imgdir not used - set directories to img1dir and img2dir
image1dir=args.img1dir
image2dir=args.img2dir
if psutil_available:
print('psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
if resource_available:
print('resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
format = "GTiff"
driver = gdal.GetDriverByName( format )
int_limit_max = np.iinfo('int16').max # get max (and min) value for rescaling float hp before converting back to signed int16
int_limit_minp1 = np.iinfo('int16').min + 1
int_nodata_val = np.iinfo('int16').min
img1_names=args.img1_name.split('.')
hp1filename = args.img1_name
hp2filename = args.img2_name
if psutil_available:
print('psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
if resource_available:
print('resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
##########################################################################
# now read hp images in and generate mask for img1
##########################################################################
img1=GeoImg_noload(hp1filename,in_dir=image1dir,datestr=args.img1datestr,datefmt=args.datestrfmt)
if psutil_available:
print('about to read hp img1 - using ',memory_usage_psutil())
if not(args.do_not_highpass_input_images):
t_log("making low pass and high pass images (set -do_not_highpass_input_images to disable this step)")
inimg1=img1.gd.ReadAsArray().astype(np.float32)
lp_arr_1=inimg1.copy()
gaussian_filter(lp_arr_1, args.gfilt_sigma, output=lp_arr_1)
hp_arr_1=inimg1 - lp_arr_1
lp_arr_1=None
t_log("done with low pass and high pass image 1 - removed lp")
img1_data_mask=np.ones_like(hp_arr_1, dtype=bool)
# need to test for presence of nodata value - fixed in 5p2
if img1.nodata_value:
img1_data_mask[inimg1==np.float32(img1.nodata_value)]=False
else:
img1_data_mask[inimg1==0]=False
if psutil_available:
print('made mask - psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
if resource_available:
print('made mask - resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
inimg1=None
else:
t_log("not filtering input images because -do_not_highpass_input_images is set")
hp_arr_1=img1.gd.ReadAsArray().astype(np.float32)
img1_data_mask=np.ones_like(hp_arr_1, dtype=bool)
# need to test for presence of nodata value - fixed in 5p2
if img1.nodata_value:
img1_data_mask[hp_arr_1==np.float32(img1.nodata_value)]=False
else:
img1_data_mask[hp_arr_1==0]=False
if psutil_available:
print('made mask - psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
if resource_available:
print('made mask - resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
if psutil_available:
print('read hp img1 - psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
if resource_available:
print('read hp img1 - resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
img2=GeoImg_noload(hp2filename,in_dir=image2dir,datestr=args.img2datestr,datefmt=args.datestrfmt)
if not(args.do_not_highpass_input_images):
# t_log("sentinel 2 data, so making low pass and high pass images")
# file_name_base = file_name_base + '_hp_filt_%3.1f'%(args.gfilt_sigma)
inimg2=img2.gd.ReadAsArray().astype(np.float32)
lp_arr_2=inimg2.copy()
gaussian_filter(lp_arr_2, args.gfilt_sigma, output=lp_arr_2)
hp_arr_2=inimg2 - lp_arr_2
lp_arr_2=None
t_log("done with low pass and high pass image 2 - removed lp")
inimg2=None
else:
hp_arr_2=img2.gd.ReadAsArray().astype(np.float32)
if psutil_available:
print('read hp img2 - psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
if resource_available:
print('read hp img2 - resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
del_t=img2.imagedatetime - img1.imagedatetime
del_t_days=del_t.total_seconds()/(24.0*3600.0)
del_t_years=del_t_days/(365.25)
# switch from m/d to m/yr if -mpy
if (args.mpy):
del_t_unit_str='years'
del_t_speedunit_str='m/yr'
del_t_val=del_t_years
else:
del_t_unit_str='days'
del_t_speedunit_str='m/d'
del_t_val=del_t_days
# offset pixels to subtract from ij_1 to get ij_2 coordinates (how far the two image arrays are offset based only on their geolocation)
offset_2_1=img1.XY2imageij(*img2.imageij2XY(*[0,0]))
# either work in specified bounding box, or find largest box of common pixels between the two images and use that
if (args.bbox is None):
# find largest common x,y using original XY (geo cartesian coordinates) box between images
# image coordinates are for bounding box (outer edge of pixels)
com_max_x=np.min([img1.max_x, img2.max_x])
com_max_y=np.min([img1.max_y, img2.max_y])
com_min_x=np.max([img1.min_x, img2.min_x])
com_min_y=np.max([img1.min_y, img2.min_y])
else: # read area from command line specified bounding box (still XY etc.)
print('specifying -bbox is not implemented in this version of sc_pycorr - must allow it to use common overlap area of the two images - exiting')
sys.exit(-1)
com_max_x=args.bbox[2]
com_max_y=args.bbox[3]
com_min_x=args.bbox[0]
com_min_y=args.bbox[1]
#############
#### should check that a bbox, if specified on the command line, fits within the common footprint of the two images - but so far we never specify a bbox on the command line
#### EITHER remove this option OR put in this check...
#############
bbox=[com_min_x, com_min_y, com_max_x, com_max_y]
# plotvmin and plotvmax - speeds for colorbar in color_based geotiffs... - ?maybe set to None for autoscaling?
# plotvmin=args.plotvmin
plotvmax=args.plotvmax
# plot_2_mult=args.plot_2_mult # vmax for second plot will be plot_2_mult * plotvmax - to show detail on low end...
half_source_chip=args.half_source_chip # must be < half_target_chip - not that anything would happen, if trackvmax is specified. This is not enforced but could be to catch errors.
half_target_chip=args.half_target_chip
# if trackvmax specified, make sure half_target_chip is large enough to support measuring that large an offset. This is set before the buffer around the grid is worked out, so that target chips remain in the images.
if args.trackvmax:
if args.trackvmax > ((half_target_chip - half_source_chip) * img1.pix_x_m / del_t_days):
half_target_chip = np.int(np.ceil(((args.trackvmax * del_t_days)/img1.pix_x_m) + half_source_chip))
print('increased half_target_chip to %d to allow tracking up to speed -trackvmax %f'%(half_target_chip,args.trackvmax))
maxtrackablevel=((half_target_chip - half_source_chip) * img1.pix_x_m / del_t_days)
print('>>>> half_target_chip %d pixels - maximum trackable velocity %f m/d (unless speed_ref at that location is lower, then it is 1.5 * (speed_ref_vel/pix_size_x_m) * del_t_days + 5 pixels) <<<<'%(half_target_chip,maxtrackablevel))
print("time separation in days: %f years: %f - using %s"%(del_t_days,del_t_years,del_t_speedunit_str))
if not(args.nlf):
log_output_lines.append('>>>> half_target_chip %d pixels - maximum trackable velocity %f m/d (unless speed_ref at that location is lower, then it is 1.5 * (speed_ref_vel/pix_size_x_m) * del_t_days + 5 pixels) <<<<'%(half_target_chip,maxtrackablevel))
log_output_lines.append("time separation in days: %f years: %f - using %s\n"%(del_t_days,del_t_years,del_t_speedunit_str))
inc=args.inc
if ((inc/2.0) != int(inc/2.0)):
print('-inc must be an even integer number of pixels for output geolocation to register properly; currently inc=%f'%(inc))
sys.exit(-1)
###############################
# sub_pixel interpolation setup
###############################
rbs_halfsize=3 # size of peak area used for spline for subpixel peak loc
rbs_order=3 # polynomial order for subpixel rbs interpolation of peak location
d2xdx2_spacing=0.05 # pixels +- for calculating curvature of spline surface at peak correlation...
# use this box to find overlap range in i and j (array coordinates) for both images
com_ul_i_j_img1=img1.XY2imageij(*(com_min_x,com_max_y))+0.5
com_ul_i_j_img2=img2.XY2imageij(*(com_min_x,com_max_y))+0.5
com_lr_i_j_img1=img1.XY2imageij(*(com_max_x,com_min_y))-0.5
com_lr_i_j_img2=img2.XY2imageij(*(com_max_x,com_min_y))-0.5
# image pixels in bbox for img1 as a slice are img1.img[c1_minj:c1_maxjp1,c1_mini:c1_maxip1]
# remember (Mark) that a python slice [3:5] returns elements 3 and 4, indexed from 0
c1_minj=com_ul_i_j_img1[1]
c1_maxjp1=(com_lr_i_j_img1[1]+1) # may be one more than the max index, for use in slices (p1 means plus 1...)
c1_mini=com_ul_i_j_img1[0]
c1_maxip1=(com_lr_i_j_img1[0]+1) # same
c2_minj=com_ul_i_j_img2[1]
c2_maxjp1=(com_lr_i_j_img2[1]+1) # same
c2_mini=com_ul_i_j_img2[0]
c2_maxip1=(com_lr_i_j_img2[0]+1) # same
com_num_pix_i=c1_maxip1-c1_mini # number of pixels per line in common (overlap) area
com_num_pix_j=c1_maxjp1-c1_minj # number of lines in common (overlap) area
if(args.v):
print('numpix %f numlines %f in box'%(com_num_pix_i,com_num_pix_j))
print('ul X %f Y %f of box'%(com_min_x,com_max_y))
print('ul image1 i %f j%f image2 i %f j%f'%(c1_mini,c1_minj,c2_mini,c2_minj))
print('lr X %f Y %f of box'%(com_max_x,com_min_y))
print('lr image1 i %f j%f image2 i %f j%f'%(c1_maxip1,c1_maxjp1,c2_maxip1,c2_maxjp1))
half_inc_rim=int(inc/2.0) # inc was checked to be even (above) so int is redundant but included for clarity - want integer-width rim...
num_grid_i=int(com_num_pix_i/inc) # these are integer divisions...
num_grid_j=int(com_num_pix_j/inc)
ul_i_1_chip_grid=int(c1_mini + half_inc_rim)
ul_j_1_chip_grid=int(c1_minj + half_inc_rim)
lr_i_1_chip_grid=int(c1_mini + half_inc_rim + ((num_grid_i-1) * inc))
lr_j_1_chip_grid=int(c1_minj + half_inc_rim + ((num_grid_j-1) * inc))
ul_i_2_chip_grid=int(c2_mini + half_inc_rim)
ul_j_2_chip_grid=int(c2_minj + half_inc_rim)
lr_i_2_chip_grid=int(c2_mini + half_inc_rim + ((num_grid_i-1) * inc))
lr_j_2_chip_grid=int(c2_minj + half_inc_rim + ((num_grid_j-1) * inc))
r_i_1=range(ul_i_1_chip_grid,lr_i_1_chip_grid+1,inc) # range over common area
r_j_1=range(ul_j_1_chip_grid,lr_j_1_chip_grid+1,inc)
r_i_2=range(ul_i_2_chip_grid,lr_i_2_chip_grid+1,inc) # range over common area
r_j_2=range(ul_j_2_chip_grid,lr_j_2_chip_grid+1,inc)
# find required size of buffer around chip center positons so that target chip centered there remains in bbox
##### Note that this does not take advantage of a rim of pixels in image 2 that are outside the common pixels in the two images - this could be fixed
# not sure that last comment is true any more - should be ok now.
min_ind_i_1=np.min(np.nonzero(np.array(r_i_1)>half_target_chip))# this is width of the rim of zeros on left or top in the correlation arrays
min_ind_j_1=np.min(np.nonzero(np.array(r_j_1)>half_target_chip))# this is width of the rim of zeros on left or top in the correlation arrays
min_ind_i_2=np.min(np.nonzero(np.array(r_i_2)>half_target_chip))# this is width of the rim of zeros on left or top in the correlation arrays
min_ind_j_2=np.min(np.nonzero(np.array(r_j_2)>half_target_chip))# this is width of the rim of zeros on left or top in the correlation arrays
min_ind_i = np.max([min_ind_i_1,min_ind_i_2])
min_ind_j = np.max([min_ind_j_1,min_ind_j_2])
max_ind_i_1=np.max(np.nonzero(half_target_chip<=(img1.num_pix_x - np.array(r_i_1)))) # maximum allowed i index (buffer right edge of grid to accomodate target chip size)
max_ind_j_1=np.max(np.nonzero(half_target_chip<=(img1.num_pix_y - np.array(r_j_1)))) # maximum allowed i index (buffer bottom edge of grid to accomodate target chip size)
max_ind_i_2=np.max(np.nonzero(half_target_chip<=(img2.num_pix_x - np.array(r_i_2)))) # maximum allowed i index (buffer right edge of grid to accomodate target chip size)
max_ind_j_2=np.max(np.nonzero(half_target_chip<=(img2.num_pix_y - np.array(r_j_2)))) # maximum allowed i index (buffer bottom edge of grid to accomodate target chip size)
max_ind_i=np.min([max_ind_i_1,max_ind_i_2]) # maximum allowed i index (buffer right edge of grid to accomodate target chip size)
max_ind_j=np.min([max_ind_j_1,max_ind_j_2]) # maximum allowed i index (buffer bottom edge of grid to accomodate target chip size)
peak_block_halfsize=3 # this is the size of the area zeroed out before looking for the second highest peak
# the following assumes square chips - which we use here so far...
cent_loc=half_target_chip-half_source_chip # Note cent_loc,cent_loc is array i,j of chip center because indexing is zero based
# the -0.5 in what follows shifts from the ij of the element to it's upper left corner (chip center)
r_grid=np.meshgrid(np.array(r_i_1)-0.5,np.array(r_j_1)-0.5,indexing='xy')
chip_center_grid_xy=img1.imageij2XY(*r_grid)
# note output_array_ul_corner will be the same as [com_min_x, com_max_y] but is derived here just to check coordinates
output_array_ul_corner=chip_center_grid_xy[:,0,0]-((inc/2.0)*img1.pix_x_m,(inc/2.0)*img1.pix_y_m)
output_array_pix_x_m=inc*img1.pix_x_m
output_array_pix_y_m=inc*img1.pix_y_m # note this will nearly always be negative, for image stored top line first, just as in world file
output_array_num_pix_x=r_i_2.__len__()
output_array_num_pix_y=r_j_2.__len__()
vel_nodata_val=np.float32(-9999.0)
corr_nodata_val=np.float32(-2.0)
curvature_nodata_value=np.float32(-999.0)
#################################
# set up (allocate) output arrays
#################################
corr_arr=np.ones((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)*corr_nodata_val # NOTE: chips not used (image nodata areas) will have this value...
del_corr_arr=np.ones((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)*corr_nodata_val
offset_dist_ij_arr=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)
sp_dist_i=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)
sp_dist_j=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)
d2idx2=np.ones((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)*curvature_nodata_value
d2jdx2=np.ones((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)*curvature_nodata_value
d2dx2_mean=np.ones((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)*curvature_nodata_value
del_i=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)
del_j=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)
orig_int_del_i=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)
orig_int_del_j=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.float32)
count_tenths=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.int16)
count_hundredths=np.zeros((output_array_num_pix_y,output_array_num_pix_x), dtype=np.int16)
if psutil_available:
print('set up output arrays - psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
if resource_available:
print('set up output arrays - resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
# following section commented out for pycorr_iceflow_v1.1
# if not(args.no_speed_ref):
# # set up vrt file to sample reference speed mosaic at chip centers - this will be used to set search chip sizes for each point
# # if args.offset_correction_speedref is True, then read vx and vy reference data as well, for offset correction
# ulx=output_array_ul_corner[0]
# uly=output_array_ul_corner[1]
# lrx=output_array_ul_corner[0] + (output_array_num_pix_x * output_array_pix_x_m)
# lry=output_array_ul_corner[1] + (output_array_num_pix_y * output_array_pix_y_m)
#
# path_to_speed_ref_vv = args.speed_ref_dir+ '/' + args.speed_ref_filename
# output_vv_vrt_name= 'tmp' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + 'vv.vrt'
#
#
#
# if not(args.Greenland): # ANTARCTICA - this is in Antarctica, where Landsat images and vref mosaic are in the SAME projection!
# # -te needs xmin ymin xmax ymax - that is what is below, using the corners
# sp.call(["gdalbuildvrt", "-te", "%f"%ulx, "%f"%lry, "%f"%lrx, "%f"%uly, "-tr", "%9.4f"%(output_array_pix_x_m), "%9.4f"%(output_array_pix_x_m), "%s"%(args.VRT_dir + '/' + output_vv_vrt_name), path_to_speed_ref_vv]) # import subprocess as sp
# speedref_geoimg_vv=GeoImg_noload(output_vv_vrt_name,in_dir=args.VRT_dir)
# speedref_vel_vv=speedref_geoimg_vv.gd.ReadAsArray().astype(np.float32)
# if speedref_geoimg_vv.nodata_value:
# speedref_vv_nodata=speedref_geoimg_vv.nodata_value
# else:
# speedref_vv_nodata=None
# os.remove(args.VRT_dir + '/' + output_vv_vrt_name) # get rid of the temporary vrt file now
# print('got speedref vel data')
# if not(args.nlf):
# log_output_lines.append('# using speed reference (speedref file: %s) from speedref vrt file: %s\n'%(args.speed_ref_filename,output_vv_vrt_name))
#
# if args.offset_correction_speedref: # need to read vx and vy ref data as well...
# path_to_speed_ref_vx = args.speed_ref_dir+ '/' + args.speed_ref_filename.replace('vv','vx') # MARIN need to fix this...
# path_to_speed_ref_vy = args.speed_ref_dir+ '/' + args.speed_ref_filename.replace('vv','vy') # MARIN need to fix this...
# output_vx_vrt_name= 'tmp' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + 'vx.vrt'
# output_vy_vrt_name= 'tmp' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + 'vy.vrt'
# sp.call(["gdalbuildvrt", "-te", "%f"%ulx, "%f"%lry, "%f"%lrx, "%f"%uly, "-tr", "%9.4f"%(output_array_pix_x_m), "%9.4f"%(output_array_pix_x_m), "%s"%(args.VRT_dir + '/' + output_vx_vrt_name), path_to_speed_ref_vx]) # import subprocess as sp
# sp.call(["gdalbuildvrt", "-te", "%f"%ulx, "%f"%lry, "%f"%lrx, "%f"%uly, "-tr", "%9.4f"%(output_array_pix_x_m), "%9.4f"%(output_array_pix_x_m), "%s"%(args.VRT_dir + '/' + output_vy_vrt_name), path_to_speed_ref_vy]) # import subprocess as sp
# speedref_geoimg_vx=GeoImg_noload(output_vx_vrt_name,in_dir=args.VRT_dir)
# speedref_vel_vx=speedref_geoimg_vx.gd.ReadAsArray().astype(np.float32)
# if speedref_geoimg_vx.nodata_value:
# speedref_vx_nodata=speedref_geoimg_vx.nodata_value
# else:
# speedref_vx_nodata=None
#
# speedref_geoimg_vy=GeoImg_noload(output_vy_vrt_name,in_dir=args.VRT_dir)
# speedref_vel_vy=speedref_geoimg_vy.gd.ReadAsArray().astype(np.float32)
# if speedref_geoimg_vy.nodata_value:
# speedref_vy_nodata=speedref_geoimg_vy.nodata_value
# else:
# speedref_vy_nodata=None
# os.remove(args.VRT_dir + '/' + output_vx_vrt_name) # get rid of the temporary vrt file now
# os.remove(args.VRT_dir + '/' + output_vy_vrt_name) # get rid of the temporary vrt file now
# if not(args.nlf):
# log_output_lines.append('# using speed reference vx and vy data as well\n')
#
# else: # this is in GREENLAND, where Landsat images and vref mosaic are in DIFFERENT PROJECTIONS and so vref components must be reprojected!
#
# # first - open the vv mosaic but don't read it in - only want the srs from this file so we know it's projection - will read subregion at output resolution with .vrt
# tmp_speed_ref_orig=GeoImg_noload(args.speed_ref_filename,args.speed_ref_dir)
# source = img1.srs # the local UTM of the input image
# target = tmp_speed_ref_orig.srs
# # set up transform from local utm to PS of vel ref image, to calculate the corners of the vref mosaic covering this area
# transform_utm_to_PS = osr.CoordinateTransformation(source, target)
# # set up inv transform to that above to project the endpoints of each PS vx,vy vector to find local vref vy and vy in utm reference frame
# inv_transform_PS_to_utm = osr.CoordinateTransformation(target, source)
#
# # set up to sample speed_ref mz at chip centers - this will be used to find mask value at each point
# # in L8 image local UTM, these are:
# utm_ulx=output_array_ul_corner[0]
# utm_uly=output_array_ul_corner[1]
# # lrx=output_array_ul_corner[0] + (r_i_2.__len__() * inc * img1.pix_x_m)
# # lry=output_array_ul_corner[1] + (r_j_2.__len__() * inc * img1.pix_y_m)
# utm_lrx=output_array_ul_corner[0] + (output_array_num_pix_x * output_array_pix_x_m)
# utm_lry=output_array_ul_corner[1] + (output_array_num_pix_y * output_array_pix_y_m)
#
# #########################
# # now we project output array corners from local UTM back to projection of speed_ref mz
# (tulx, tuly, tulz ) = transform_utm_to_PS.TransformPoint( utm_ulx, utm_uly, 0.0 )
# (turx, tury, turz ) = transform_utm_to_PS.TransformPoint( utm_lrx, utm_uly, 0.0 )
# (tlrx, tlry, tlrz ) = transform_utm_to_PS.TransformPoint( utm_lrx, utm_lry, 0.0 )
# (tllx, tlly, tllz ) = transform_utm_to_PS.TransformPoint( utm_ulx, utm_lry, 0.0 )
# #####################
# # Make .vrt for speed_ref image that is in local UTM to match Band 8 images
# # - will reproject speed_ref img in memory to local utm after reading in just this part of data - .vrt limits the read size...
# #####################
# ulx=np.min([tulx,tllx])
# uly=np.max([tuly,tury])
# lrx=np.max([turx,tlrx])
# lry=np.min([tlly,tlry])
#
# sp.call(["gdalbuildvrt", "-te", "%f"%ulx, "%f"%lry, "%f"%lrx, "%f"%uly, "-tr", "%9.4f"%(output_array_pix_x_m), "%9.4f"%(output_array_pix_x_m), "%s"%(args.VRT_dir + '/' + output_vv_vrt_name), path_to_speed_ref_vv]) # import subprocess as sp
# speed_ref_vv_cropped_geoimg=GeoImg_noload(output_vv_vrt_name,in_dir=args.VRT_dir)
# # AM I CORRECT (11/3/16)? that this next read served no purpose? commenting it out here to see...
# # speed_ref_vv_cropped_mask=speed_ref_vv_cropped_geoimg.gd.ReadAsArray().astype(np.float32)
# if speed_ref_vv_cropped_geoimg.nodata_value:
# speedref_vv_nodata=speed_ref_vv_cropped_geoimg.nodata_value
# else:
# speedref_vv_nodata=None
# tmp_speed_ref_orig=None # close GeoImg_noload object used on full original lgo_mask file, used to get srs above, but no input of data
#
# #####################
# # now reproject cropped input mask into local UTM so speed_ref_vv can be used to identify
# # pixels that shouldn't move
# #####################
# mem_drv = gdal.GetDriverByName( 'MEM' )
# # inmem_lgo_mask_utm=mem_drv.CreateCopy('',in_vx.gd) # actually don't need the data, but the rest is good
# inmem_speed_ref_vv_utm = mem_drv.Create('', output_array_num_pix_x, output_array_num_pix_y, 1, gdal.GDT_Float32)
# inmem_speed_ref_vv_utm.SetGeoTransform( [ output_array_ul_corner[0], output_array_pix_x_m, 0, output_array_ul_corner[1], 0, output_array_pix_y_m ] ) #
# inmem_speed_ref_vv_utm.SetProjection ( img1.proj )
# #############################################
# # following is fix for mem driver not copying nodata pixels - set nodata value and fill band before reprojecting
# # FROM: https://trac.osgeo.org/gdal/ticket/6404 or stackexchange links found there
# #############################################
# rb1=inmem_speed_ref_vv_utm.GetRasterBand(1)
# rb1.SetNoDataValue(speedref_vv_nodata)
# rb1.Fill(speedref_vv_nodata)
#
# res = gdal.ReprojectImage( speed_ref_vv_cropped_geoimg.gd, inmem_speed_ref_vv_utm, speed_ref_vv_cropped_geoimg.srs.ExportToWkt(), img1.srs.ExportToWkt(), gdal.GRA_NearestNeighbour )
# speedref_vel_vv=inmem_speed_ref_vv_utm.ReadAsArray().astype(np.float32)
#
#
# print('got speed_ref_vv data and reprojected to original image local utm')
# if not(args.nlf):
# log_output_lines.append('# got speed_ref_vv data and reprojected to original image local utm')
# log_output_lines.append('# speed_ref_vv reference (speed_ref_vv mask file: %s) from speed_ref_vv vrt file: %s\n'%(path_to_speed_ref_vv,output_vv_vrt_name))
#
# if args.offset_correction_speedref: # need to read and reproject vx and vy ref data as well...
# path_to_speed_ref_vx = args.speed_ref_dir+ '/' + args.speed_ref_filename.replace('vv','vx')
# path_to_speed_ref_vy = args.speed_ref_dir+ '/' + args.speed_ref_filename.replace('vv','vy')
# output_vx_vrt_name= 'tmp' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + 'vx.vrt'
# output_vy_vrt_name= 'tmp' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + 'vy.vrt'
# sp.call(["gdalbuildvrt", "-te", "%f"%ulx, "%f"%lry, "%f"%lrx, "%f"%uly, "-tr", "%9.4f"%(output_array_pix_x_m), "%9.4f"%(output_array_pix_x_m), "%s"%(args.VRT_dir + '/' + output_vx_vrt_name), path_to_speed_ref_vx]) # import subprocess as sp
# sp.call(["gdalbuildvrt", "-te", "%f"%ulx, "%f"%lry, "%f"%lrx, "%f"%uly, "-tr", "%9.4f"%(output_array_pix_x_m), "%9.4f"%(output_array_pix_x_m), "%s"%(args.VRT_dir + '/' + output_vy_vrt_name), path_to_speed_ref_vy]) # import subprocess as sp
# speed_ref_vx_cropped_geoimg=GeoImg_noload(output_vx_vrt_name,in_dir=args.VRT_dir)
# speed_ref_vy_cropped_geoimg=GeoImg_noload(output_vy_vrt_name,in_dir=args.VRT_dir)
# # reproject PS vx value into utm before converting to utm vx value
# inmem_speed_ref_vx_utm = mem_drv.Create('', output_array_num_pix_x, output_array_num_pix_y, 1, gdal.GDT_Float32)
# inmem_speed_ref_vx_utm.SetGeoTransform( [ output_array_ul_corner[0], output_array_pix_x_m, 0, output_array_ul_corner[1], 0, output_array_pix_y_m ] ) #
# inmem_speed_ref_vx_utm.SetProjection ( img1.proj )
# res = gdal.ReprojectImage( speed_ref_vx_cropped_geoimg.gd, inmem_speed_ref_vx_utm, speed_ref_vx_cropped_geoimg.srs.ExportToWkt(), img1.srs.ExportToWkt(), gdal.GRA_NearestNeighbour )
# speedref_vel_PS_vx_in_utm=inmem_speed_ref_vv_utm.ReadAsArray().astype(np.float32)
# # reproject PS vy value into utm before converting to utm vy value
# inmem_speed_ref_vy_utm = mem_drv.Create('', output_array_num_pix_x, output_array_num_pix_y, 1, gdal.GDT_Float32)
# inmem_speed_ref_vy_utm.SetGeoTransform( [ output_array_ul_corner[0], output_array_pix_x_m, 0, output_array_ul_corner[1], 0, output_array_pix_y_m ] ) #
# inmem_speed_ref_vy_utm.SetProjection ( img1.proj )
# res = gdal.ReprojectImage( speed_ref_vy_cropped_geoimg.gd, inmem_speed_ref_vy_utm, speed_ref_vy_cropped_geoimg.srs.ExportToWkt(), img1.srs.ExportToWkt(), gdal.GRA_NearestNeighbour )
# speedref_vel_PS_vy_in_utm=inmem_speed_ref_vv_utm.ReadAsArray().astype(np.float32)
# # will convert the PSvx,PSvy components (at local utm centers already) to local utmvx,utmvy components to compare with feature tracking of local utm Landsat...
# if speedref_vv_nodata:
# speedref_vel_vx=np.ones_like(speedref_vel_vv) * speedref_vv_nodata
# speedref_vel_vy=np.ones_like(speedref_vel_vv) * speedref_vv_nodata
# pts=np.where(speedref_vel_vv!=speedref_vv_nodata) # indices of points to reproject vx and vy
# else:
# speedref_vel_vx=np.zeros_like(speedref_vel_vv)
# speedref_vel_vy=np.zeros_like(speedref_vel_vv)
# pts=np.where((speedref_vel_vv!=0.0)&(speedref_vel_vv!=-99.0)) # indices of points to reproject vx and vy
# for i,j in zip(*pts): # iterate over these points
# PSvx=speedref_vel_PS_vx_in_utm[i,j]
# PSvy=speedref_vel_PS_vy_in_utm[i,j]
# UTMx,UTMy=chip_center_grid_xy[:,i,j]
# (PSx,PSy,PSz) = transform_utm_to_PS.TransformPoint( UTMx, UTMy, 0.0 )
# (UTMx_vec_end_point, UTMy_vec_end_point, UTMz_vec_end_point) = inv_transform_PS_to_utm.TransformPoint( PSx + PSvx, PSy + PSvy, PSz)
# speedref_vel_vx[i,j] = UTMx_vec_end_point - UTMx
# speedref_vel_vy[i,j] = UTMy_vec_end_point - UTMy
#
# os.remove(args.VRT_dir + '/' + output_vx_vrt_name) # get rid of the temporary vrt file now
# os.remove(args.VRT_dir + '/' + output_vy_vrt_name) # get rid of the temporary vrt file now
# os.remove(args.VRT_dir + '/' + output_vv_vrt_name) # get rid of the temporary vrt file now
# ##########################
# # commented these out for debugging driver issue - uncomment for production to free memory
# ##########################
# # inmem_speed_ref_vv_utm=None
# # inmem_speed_ref_vx_utm=None
# # inmem_speed_ref_vy_utm=None
#
# print('got speed_ref_vx and vy data and reprojected to original image local utm')
# if not(args.nlf):
# log_output_lines.append('# got speed_ref_vx and vy data and reprojected to original image local utm')
#
# if psutil_available:
# print('got speed_ref data - psutil reports process %s using '%(args.out_name_base),memory_usage_psutil())
# if resource_available:
# print('got speed_ref data - resource module reports process %s using '%(args.out_name_base),memory_usage_resource())
if args.offset_correction_lgo_mask or args.use_itslive_land_mask_from_web:
# use land(1)-glacier(0)-ocean(2) mask to find land pixels for offset correction - means reprojecting mask into Landsat image's local UTM
######################### Following code started from GRN_find_offset_reproject_to_PS_olg_mask_v3_fitWCS_ifnoland_multiprocess.py
# now make a .vrt of the area of the land/ocean/glacier mask image (so you only
# have to read in a small part of the mask)
# original mask is in it's own projection
# find local UTM corners of input, transform to mask's projection, set up .vrt of required area
# from mask, read it in, warp in memory to local UTM, apply land mask to find