...
 
Commits (2)
......@@ -86,8 +86,6 @@ def main():
target_dir = Path(args['<target_dir>'])
debug_mode = args['--debug']
logging.basicConfig(format="%(levelname)s: %(message)s", level=(logging.DEBUG if debug_mode else logging.INFO))
# Does user asked for a nb series per dataset limit ?
# limit_nb_dataset_series = int(args['--limit_nb_dataset_series']) if args['--limit_nb_dataset_series'] else None
# Write datapackage.json
write_json_file(target_dir / 'datapackage.json', DATAPACKAGE_JSON)
......@@ -162,7 +160,9 @@ def main():
if ignored_datasets > 0:
log.info("{} ignored dataset(s) due to '--only' or '--from' options".format(ignored_datasets))
if datasets_in_error:
log.error("{}/{} dataset(s) ignored due to errors: {}".format(len(datasets_in_error), total_nb_datasets, ', '.join(datasets_in_error)))
ignored_datasets_str = ': ' + ', '.join(datasets_in_error) if len(datasets_in_error) < 100 else ''
log.error("{}/{} dataset(s) ignored due to errors {}".format(len(datasets_in_error), total_nb_datasets,
ignored_datasets_str))
def iterate_dataset(filepath):
......@@ -267,7 +267,7 @@ def iterate_dataset(filepath):
# Make periods format match DBnomics periods format
format_periods(dataframe, time_dimension)
# Do the hard job: cut dataframe onto series
dataframe = dataframe.set_index(time_dimension.label)
dataframe = dataframe.set_index(time_dimension.did)
group_by = dataframe.groupby(list(real_dimensions_codes))
# Iterate through those sub dataframes and return series data
for dimensions_values_codes, sub_df in group_by:
......@@ -321,28 +321,6 @@ def remove_href_keys(item):
remove_href_keys(item['children'])
def str_to_float(string):
"""Try to convert given string to float and return it.
Return ValueError if conversion failed
"""
try:
float_ = float(string)
except ValueError:
return ValueError
return float_
def str_to_int(string):
"""Try to convert given string to int and return it.
Return ValueError if conversion failed
"""
try:
int_ = int(string)
except ValueError:
return ValueError
return int_
def write_json_file(file_path, data):
with file_path.open('w', encoding='utf-8') as file_:
json.dump(data, file_, ensure_ascii=False, indent=2, sort_keys=True)
......