Skip to content

Commit

Permalink
Merge pull request #42 from FRBs/varying_F
Browse files Browse the repository at this point in the history
PR for F analysis
  • Loading branch information
profxj authored Jul 27, 2023
2 parents 1c9f642 + 5c42aa3 commit dc016b2
Show file tree
Hide file tree
Showing 96 changed files with 15,123 additions and 3,467 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
python: [3.8, 3.9]
python: ['3.9','3.10']
toxenv: [test, test-alldeps, test-astropydev]
steps:
- name: Check out repository
Expand Down
2 changes: 0 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -170,5 +170,3 @@ zdm/untitled10.py

*.DS_Store
papers/H0_I/Analysis/Real/minicube_real.src


80 changes: 80 additions & 0 deletions papers/F/Analysis/CRACO/Cloud/OutputMini/nautilus_craco_mini.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# 25 processors on mini for Varying F
# kubectl exec -it test-pod -- /bin/bash
apiVersion: batch/v1
kind: Job
metadata:
name: xavier-zdm-craco-full-3rd-10
spec:
backoffLimit: 0
template:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: NotIn
values:
- k8s-chase-ci-01.noc.ucsb.edu
- key: nvidia.com/gpu.product
operator: In
values:
- NVIDIA-GeForce-GTX-1080-Ti
containers:
- name: container
image: localhost:30081/profxj/zdm_docker:latest # UPDATE
imagePullPolicy: Always
resources:
requests:
cpu: "25"
memory: "8Gi" #
ephemeral-storage: 50Gi #
limits:
cpu: "27"
memory: "12Gi"
ephemeral-storage: 100Gi
#nvidia.com/gpu: "1" # See docs to exlude certain types
command: ["/bin/bash", "-c"]
args:
- cd FRB/FRB;
git fetch;
git pull;
python setup.py develop;
cd ../ne2001;
python setup.py develop;
cd ../zdm;
git fetch;
git checkout varying_F;
python setup.py develop;
cd papers/F/Analysis/CRACO/Cloud;
python run_craco_full.py -n 25 -t 25 -b 1;
aws --endpoint http://rook-ceph-rgw-nautiluss3.rook s3 cp Output s3://zdm/Cubes/F/mini/ --recursive --force;
env:
- name: "ENDPOINT_URL"
value: "http://rook-ceph-rgw-nautiluss3.rook"
- name: "S3_ENDPOINT"
value: "rook-ceph-rgw-nautiluss3.rook"
volumeMounts:
- name: prp-s3-credentials
mountPath: "/root/.aws/credentials"
subPath: "credentials"
- name: ephemeral
mountPath: "/tmp"
- name: "dshm"
mountPath: "/dev/shm"
nodeSelector:
nautilus.io/disktype: nvme
restartPolicy: Never
volumes:
# Secrets file for nautilus s3 credentials .aws/credentials and .s3cfg
- name: prp-s3-credentials
secret:
secretName: prp-s3-credentials
# Shared memory (necessary for Python's multiprocessing.shared_memory module to work)
- name: dshm
emptyDir:
medium: Memory
# Ephemeral storage
- name: ephemeral
emptyDir: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# 25 processors on mini for Varying F
# kubectl exec -it test-pod -- /bin/bash
apiVersion: batch/v1
kind: Job
metadata:
name: xavier-zdm-craco-full-3rd-10
spec:
backoffLimit: 0
template:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: NotIn
values:
- k8s-chase-ci-01.noc.ucsb.edu
- key: nvidia.com/gpu.product
operator: In
values:
- NVIDIA-GeForce-GTX-1080-Ti
containers:
- name: container
image: localhost:30081/profxj/zdm_docker:latest # UPDATE
imagePullPolicy: Always
resources:
requests:
cpu: "25"
memory: "8Gi" #
ephemeral-storage: 50Gi #
limits:
cpu: "27"
memory: "12Gi"
ephemeral-storage: 100Gi
#nvidia.com/gpu: "1" # See docs to exlude certain types
command: ["/bin/bash", "-c"]
args:
- cd FRB/FRB;
git fetch;
git pull;
python setup.py develop;
cd ../ne2001;
python setup.py develop;
cd ../zdm;
git fetch;
git checkout varying_F;
python setup.py develop;
cd papers/F/Analysis/CRACO/Cloud;
python run_craco_full.py -n 25 -t 25 -b 1;
aws --endpoint http://rook-ceph-rgw-nautiluss3.rook s3 cp Output s3://zdm/Cubes/F/mini/ --recursive --force;
env:
- name: "ENDPOINT_URL"
value: "http://rook-ceph-rgw-nautiluss3.rook"
- name: "S3_ENDPOINT"
value: "rook-ceph-rgw-nautiluss3.rook"
volumeMounts:
- name: prp-s3-credentials
mountPath: "/root/.aws/credentials"
subPath: "credentials"
- name: ephemeral
mountPath: "/tmp"
- name: "dshm"
mountPath: "/dev/shm"
nodeSelector:
nautilus.io/disktype: nvme
restartPolicy: Never
volumes:
# Secrets file for nautilus s3 credentials .aws/credentials and .s3cfg
- name: prp-s3-credentials
secret:
secretName: prp-s3-credentials
# Shared memory (necessary for Python's multiprocessing.shared_memory module to work)
- name: dshm
emptyDir:
medium: Memory
# Ephemeral storage
- name: ephemeral
emptyDir: {}
81 changes: 81 additions & 0 deletions papers/F/Analysis/CRACO/Cloud/nautilus_craco_full_logF.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# 21 processors on full for Varying F
# kubectl exec -it test-pod -- /bin/bash
apiVersion: batch/v1
kind: Job
metadata:
name: x-zdm-craco-full-logf
spec:
backoffLimit: 0
template:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: NotIn
values:
- k8s-chase-ci-01.noc.ucsb.edu
- key: nvidia.com/gpu.product
operator: In
values:
- NVIDIA-GeForce-GTX-1080-Ti
containers:
- name: container
image: localhost:30081/profxj/zdm_docker:latest # UPDATE
imagePullPolicy: Always
resources:
requests:
cpu: "21"
memory: "8Gi" #
ephemeral-storage: 50Gi #
limits:
cpu: "23"
memory: "12Gi"
ephemeral-storage: 100Gi
#nvidia.com/gpu: "1" # See docs to exlude certain types
command: ["/bin/bash", "-c"]
args:
- cd FRB;
git fetch;
git pull;
python setup.py develop;
cd ../ne2001;
python setup.py develop;
cd ../zdm;
git fetch;
git checkout varying_F;
python setup.py develop;
cd papers/F/Analysis/CRACO/Cloud;
mkdir Output;
python run_craco_full_logF.py -n 21 -t 21 -b 1;
aws --endpoint http://rook-ceph-rgw-nautiluss3.rook s3 cp Output s3://zdm/Cubes/F/full/ --recursive --force;
env:
- name: "ENDPOINT_URL"
value: "http://rook-ceph-rgw-nautiluss3.rook"
- name: "S3_ENDPOINT"
value: "rook-ceph-rgw-nautiluss3.rook"
volumeMounts:
- name: prp-s3-credentials
mountPath: "/root/.aws/credentials"
subPath: "credentials"
- name: ephemeral
mountPath: "/tmp"
- name: "dshm"
mountPath: "/dev/shm"
nodeSelector:
nautilus.io/disktype: nvme
restartPolicy: Never
volumes:
# Secrets file for nautilus s3 credentials .aws/credentials and .s3cfg
- name: prp-s3-credentials
secret:
secretName: prp-s3-credentials
# Shared memory (necessary for Python's multiprocessing.shared_memory module to work)
- name: dshm
emptyDir:
medium: Memory
# Ephemeral storage
- name: ephemeral
emptyDir: {}
80 changes: 80 additions & 0 deletions papers/F/Analysis/CRACO/Cloud/nautilus_craco_mini_logF.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# 25 processors on mini for Varying F
# kubectl exec -it test-pod -- /bin/bash
apiVersion: batch/v1
kind: Job
metadata:
name: jay-zdm-craco-mini-logf
spec:
backoffLimit: 0
template:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: NotIn
values:
- k8s-chase-ci-01.noc.ucsb.edu
- key: nvidia.com/gpu.product
operator: In
values:
- NVIDIA-GeForce-GTX-1080-Ti
containers:
- name: container
image: localhost:30081/profxj/zdm_docker:latest # UPDATE
imagePullPolicy: Always
resources:
requests:
cpu: "25"
memory: "8Gi" #
ephemeral-storage: 50Gi #
limits:
cpu: "27"
memory: "12Gi"
ephemeral-storage: 100Gi
#nvidia.com/gpu: "1" # See docs to exlude certain types
command: ["/bin/bash", "-c"]
args:
- cd FRB;
git fetch;
git pull;
python setup.py develop;
cd ../ne2001;
python setup.py develop;
cd ../zdm;
git fetch;
git checkout logF;
python setup.py develop;
cd papers/F/Analysis/CRACO/Cloud;
python run_craco_mini_logF.py -n 25 -t 25 -b 1;
aws --endpoint http://rook-ceph-rgw-nautiluss3.rook s3 cp Output s3://zdm/Cubes/F/mini/ --recursive --force;
env:
- name: "ENDPOINT_URL"
value: "http://rook-ceph-rgw-nautiluss3.rook"
- name: "S3_ENDPOINT"
value: "rook-ceph-rgw-nautiluss3.rook"
volumeMounts:
- name: prp-s3-credentials
mountPath: "/root/.aws/credentials"
subPath: "credentials"
- name: ephemeral
mountPath: "/tmp"
- name: "dshm"
mountPath: "/dev/shm"
nodeSelector:
nautilus.io/disktype: nvme
restartPolicy: Never
volumes:
# Secrets file for nautilus s3 credentials .aws/credentials and .s3cfg
- name: prp-s3-credentials
secret:
secretName: prp-s3-credentials
# Shared memory (necessary for Python's multiprocessing.shared_memory module to work)
- name: dshm
emptyDir:
medium: Memory
# Ephemeral storage
- name: ephemeral
emptyDir: {}
Loading

0 comments on commit dc016b2

Please sign in to comment.