Commit 926e7c54 authored by Bruno Duyé's avatar Bruno Duyé
Browse files

Add --limit_nb_dataset_series option

parent 745696b4
......@@ -32,8 +32,9 @@ target_dir: path of target directory to write datasets & series in DBnomics form
=> all files will be deleted
Options:
--debug show debug output, and compute some tests that makes process slower
--only-dataset <dataset_code> process only given dataset acronym
--debug show debug output, and compute some tests that makes process slower
--only-dataset <dataset_code> process only given dataset acronym
-l --limit_nb_dataset_files <number> limit the number of files to convert per dataset
"""
import logging
......@@ -77,6 +78,7 @@ def main():
global debug_mode
global source_dir
global target_dir
global args
args = docopt(__doc__.format(self_filename=os.path.basename(__file__)))
source_dir = args['<source_dir>']
assert os.path.exists(source_dir)
......@@ -181,6 +183,7 @@ def convert_datastet(dataset_source_dict, dataset_acronym, dataset_target_path,
'country': {}
}
dataset_series_jsons_data = []
nb_converted_files = 0
# Parse series for this indicator (each entry in series.json)
for series_group_source_json in dataset_series_source_json:
indicator_code = series_group_source_json['id'] # Example: 'AG.LND.AGRI.K2'
......@@ -198,6 +201,9 @@ def convert_datastet(dataset_source_dict, dataset_acronym, dataset_target_path,
dimensions_values_labels_by_codes['indicator'][indicator_code] = indicator_label
# Add series json data to dataset's ones
dataset_series_jsons_data.extend(series_json_data)
nb_converted_files += 1
if args['--limit_nb_dataset_files'] and nb_converted_files == int(args['--limit_nb_dataset_files']):
break
# Return dataset.json content
return {
'code': dataset_code,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment