Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

upgrade from pyDataverse 0.2.1 to 0.3.3 #44

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 15 additions & 14 deletions create_sample_data.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from pyDataverse.api import Api
from pyDataverse.api import NativeApi
from pyDataverse.models import Datafile
import json
import dvconfig
import os
Expand All @@ -13,18 +14,16 @@
except:
print("Using API token from config file.")
paths = dvconfig.sample_data
api = Api(base_url, api_token)
print(api.status)
api = NativeApi(base_url, api_token)
print(api.get_info_version())
# TODO limit amount of recursion
def check_dataset_lock(dataset_dbid):
query_str = '/datasets/' + str(dataset_dbid) + '/locks'
params = {}
resp = api.get_request(query_str, params=params, auth=True)
def check_dataset_lock(dataset_pid):
resp = api.get_dataset_lock(dataset_pid)
locks = resp.json()['data']
if (locks):
print('Lock found for dataset id ' + str(dataset_dbid) + '... sleeping...')
print('Lock found for dataset id ' + str(dataset_pid) + '... sleeping...')
time.sleep(2)
check_dataset_lock(dataset_dbid)
check_dataset_lock(dataset_pid)
resp = api.get_dataverse(':root')
buff = StringIO("")
if (resp.status_code == 401):
Expand All @@ -48,9 +47,8 @@ def check_dataset_lock(dataset_dbid):
with open(dv_json) as f:
metadata = json.load(f)
print(metadata)
# FIXME: Why is "identifier" required?
identifier = metadata['alias']
resp = api.create_dataverse(identifier, json.dumps(metadata), parent=parent)
resp = api.create_dataverse(parent, json.dumps(metadata))
print(resp)
resp = api.publish_dataverse(identifier)
print(resp)
Expand All @@ -73,12 +71,15 @@ def check_dataset_lock(dataset_dbid):
relpath = os.path.relpath(filepath,files_dir)
# "directoryLabel" is used to populate "File Path"
directoryLabel, filename = os.path.split(relpath)
resp = api.upload_file(dataset_pid, "'" + filepath + "'")
df = Datafile()
df_filename = filepath
df.set({"pid": dataset_pid, "filename": df_filename})
resp = api.upload_datafile(dataset_pid, df_filename, df.json())
print(resp)
file_id = resp['data']['files'][0]['dataFile']['id']
file_id = resp.json()['data']['files'][0]['dataFile']['id']
## This lock check and sleep is here to prevent the dataset from being permanently
## locked because a tabular file was uploaded first.
check_dataset_lock(dataset_dbid)
check_dataset_lock(dataset_pid)
# TODO: Think more about where the description comes from. A "sidecar" file as proposed at https://github.com/IQSS/dataverse/issues/5924#issuecomment-499605672 ?
# L.A.: I implemented something along these lines - an (optional) directory called ".filemetadata"
# in the dataset directory, where files containing extra json filemetadata records may be
Expand Down
9 changes: 3 additions & 6 deletions destroy_all_dvobjects.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from pyDataverse.api import Api
from pyDataverse.api import NativeApi
import json
import dvconfig
import requests
Expand All @@ -10,8 +10,7 @@
print("Using API token from $API_TOKEN.")
except:
print("Using API token from config file.")
api = Api(base_url, api_token)
print('API status: ' +api.status)
api = NativeApi(base_url, api_token)

dataverse_ids = []
dataset_ids = []
Expand All @@ -35,9 +34,7 @@ def main():
print("Done.")

def find_children(dataverse_database_id):
query_str = '/dataverses/' + str(dataverse_database_id) + '/contents'
params = {}
resp = api.get_request(query_str, params=params, auth=True)
resp = api.get_dataverse_contents(dataverse_database_id, auth=True)
for dvobject in resp.json()['data']:
dvtype = dvobject['type']
dvid = dvobject['id']
Expand Down
5 changes: 3 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
pyDataverse==0.2.1
CairoSVG==2.7.1
pyDataverse==0.3.2
CairoSVG==2.7.1
requests==2.31.0