You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

106 lines
3.6 KiB

  1. from dataflows import Flow, load, unpivot, find_replace, set_type, dump_to_path, update_package, update_resource, join, join_with_self, add_computed_field, delete_fields, checkpoint, duplicate
  2. BASE_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/'
  3. CONFIRMED = 'time_series_19-covid-Confirmed.csv'
  4. DEATH = 'time_series_19-covid-Deaths.csv'
  5. RECOVERED = 'time_series_19-covid-Recovered.csv'
  6. def to_normal_date(row):
  7. old_date = row['Date']
  8. month, day, year = row['Date'].split('-')
  9. day = f'0{day}' if len(day) == 1 else day
  10. month = f'0{month}' if len(month) == 1 else month
  11. row['Date'] = '-'.join([day, month, year])
  12. unpivoting_fields = [
  13. { 'name': '([0-9]+\/[0-9]+\/[0-9]+)', 'keys': {'Date': r'\1'} }
  14. ]
  15. extra_keys = [{'name': 'Date', 'type': 'string'} ]
  16. extra_value = {'name': 'Case', 'type': 'number'}
  17. Flow(
  18. load(f'{BASE_URL}{CONFIRMED}'),
  19. load(f'{BASE_URL}{RECOVERED}'),
  20. load(f'{BASE_URL}{DEATH}'),
  21. checkpoint('load_data'),
  22. unpivot(unpivoting_fields, extra_keys, extra_value),
  23. find_replace([{'name': 'Date', 'patterns': [{'find': '/', 'replace': '-'}]}]),
  24. to_normal_date,
  25. set_type('Date', type='date', format='%d-%m-%y', resources=None),
  26. set_type('Case', type='number', resources=None),
  27. join(
  28. source_name='time_series_19-covid-Confirmed',
  29. source_key=['Province/State', 'Country/Region', 'Date'],
  30. source_delete=True,
  31. target_name='time_series_19-covid-Deaths',
  32. target_key=['Province/State', 'Country/Region', 'Date'],
  33. fields=dict(Confirmed={
  34. 'name': 'Case',
  35. 'aggregate': 'first'
  36. })
  37. ),
  38. join(
  39. source_name='time_series_19-covid-Recovered',
  40. source_key=['Province/State', 'Country/Region', 'Date'],
  41. source_delete=True,
  42. target_name='time_series_19-covid-Deaths',
  43. target_key=['Province/State', 'Country/Region', 'Date'],
  44. fields=dict(Recovered={
  45. 'name': 'Case',
  46. 'aggregate': 'first'
  47. })
  48. ),
  49. add_computed_field(
  50. target={'name': 'Deaths', 'type': 'number'},
  51. operation='format',
  52. with_='{Case}'
  53. ),
  54. delete_fields(['Case']),
  55. update_resource('time_series_19-covid-Deaths', name='time-series-19-covid-combined', path='data/time-series-19-covid-combined.csv'),
  56. checkpoint('processed_data'),
  57. # Duplicate the stream to create aggregated data
  58. duplicate(
  59. source='time-series-19-covid-combined',
  60. target_name='worldwide-aggregated',
  61. target_path='worldwide-aggregated.csv'
  62. ),
  63. join_with_self(
  64. resource_name='worldwide-aggregated',
  65. join_key=['Date'],
  66. fields=dict(
  67. Date={
  68. 'name': 'Date'
  69. },
  70. Confirmed={
  71. 'name': 'Confirmed',
  72. 'aggregate': 'sum'
  73. },
  74. Recovered={
  75. 'name': 'Recovered',
  76. 'aggregate': 'sum'
  77. },
  78. Deaths={
  79. 'name': 'Deaths',
  80. 'aggregate': 'sum'
  81. }
  82. )
  83. ),
  84. update_package(
  85. name='covid-19',
  86. title='Novel Coronavirus 2019',
  87. views=[
  88. {
  89. "title": "Total world to date",
  90. "resources": ["worldwide-aggregated"],
  91. "specType": "simple",
  92. "spec": {
  93. "group": "Date",
  94. "series": ["Confirmed", "Recovered", "Deaths"],
  95. "type": "line"
  96. }
  97. }
  98. ]
  99. ),
  100. dump_to_path()
  101. ).results()[0]