test_tsio.py 50.3 KB
Newer Older
1
# coding: utf-8
2
import calendar
3
from datetime import datetime, timedelta
4
from pathlib import Path
5
from time import time
6
7
from functools import partial
import pytz
8

9
from dateutil import parser
10
import pytest
11
12
import numpy as np
import pandas as pd
13

14
from tshistory.snapshot import Snapshot
15
16
17
18
19
20
from tshistory.testutil import (
    assert_df,
    assert_group_equals,
    genserie,
    tempattr
)
21

22
DATADIR = Path(__file__).parent / 'data'
23

Aurélien Campéas's avatar
Aurélien Campéas committed
24

25
26
27
28
def utcdt(*dt):
    return pd.Timestamp(datetime(*dt), tz='UTC')


29
def test_changeset(engine, tsh):
30
    index = pd.date_range(start=datetime(2017, 1, 1), freq='D', periods=3)
31
    data = [1., 2., 3.]
32

33
34
35
36
    with engine.connect() as cn:
        with tsh.newchangeset(cn, 'babar', _insertion_date=utcdt(2020, 1, 1)):
            tsh.insert(cn, pd.Series(data, index=index), 'ts_values', author='WONTBEUSED')
            tsh.insert(cn, pd.Series(['a', 'b', 'c'], index=index), 'ts_othervalues')
37

38
39
40
        # bogus author won't show up
        assert tsh.log(engine)[0]['author'] == 'babar'

41
42
        g = tsh.get_group(engine, 'ts_values')
        g2 = tsh.get_group(engine, 'ts_othervalues')
43
        assert_group_equals(g, g2)
44

45
        with pytest.raises(AssertionError):
46
            tsh.insert(engine, pd.Series([2, 3, 4], index=index), 'ts_values')
47

48
        with engine.connect() as cn:
49
            data.append(data.pop(0))
50
            with tsh.newchangeset(cn, 'celeste', _insertion_date=utcdt(2020, 1, 1)):
51
                tsh.insert(cn, pd.Series(data, index=index), 'ts_values')
52
                # below should be a noop
53
                tsh.insert(cn, pd.Series(['a', 'b', 'c'], index=index), 'ts_othervalues')
54

55
    g = tsh.get_group(engine, 'ts_values')
56
57
    assert ['ts_values'] == list(g.keys())

58
    assert_df("""
59
60
61
2017-01-01    2.0
2017-01-02    3.0
2017-01-03    1.0
62
""", tsh.get(engine, 'ts_values'))
63

64
    assert_df("""
65
66
67
2017-01-01    a
2017-01-02    b
2017-01-03    c
68
""", tsh.get(engine, 'ts_othervalues'))
69

70
    log = tsh.log(engine, names=['ts_values', 'ts_othervalues'])
71
72
73
    assert [
        {'author': 'babar',
         'rev': 1,
74
         'date': pd.Timestamp('2020-01-01 00:00:00+0000', tz='UTC'),
75
         'meta': {},
76
77
78
         'names': ['ts_values', 'ts_othervalues']},
        {'author': 'celeste',
         'rev': 2,
79
         'meta': {},
80
         'date': pd.Timestamp('2020-01-01 00:00:00+0000', tz='UTC'),
81
82
83
         'names': ['ts_values']}
    ] == log

84
    log = tsh.log(engine, names=['ts_othervalues'])
85
86
    assert len(log) == 1
    assert log[0]['rev'] == 1
87
    assert log[0]['names'] == ['ts_values', 'ts_othervalues']
88

89
    log = tsh.log(engine, fromrev=2)
90
91
    assert len(log) == 1

92
    log = tsh.log(engine, torev=1)
93
94
    assert len(log) == 1

95
    info = tsh.info(engine)
96
97
98
99
100
101
    assert {
        'changeset count': 2,
        'serie names': ['ts_othervalues', 'ts_values'],
        'series count': 2
    } == info

102

103
104
def test_strip(engine, tsh):
    for i in range(1, 5):
105
        pubdate = utcdt(2017, 1, i)
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
        ts = genserie(datetime(2017, 1, 10), 'H', 1 + i)
        with tsh.newchangeset(engine, 'babar', _insertion_date=pubdate):
            tsh.insert(engine, ts, 'xserie')
        # also insert something completely unrelated
        tsh.insert(engine, genserie(datetime(2018, 1, 1), 'D', 1 + i), 'yserie', 'celeste')

    csida = tsh.changeset_at(engine, 'xserie', datetime(2017, 1, 3))
    assert csida is not None
    csidb = tsh.changeset_at(engine, 'xserie', datetime(2017, 1, 3, 1), mode='before')
    csidc = tsh.changeset_at(engine, 'xserie', datetime(2017, 1, 3, 1), mode='after')
    assert csidb < csida < csidc

    log = tsh.log(engine, names=['xserie', 'yserie'])
    assert [(idx, l['author']) for idx, l in enumerate(log, start=1)
    ] == [
        (1, 'babar'),
        (2, 'celeste'),
        (3, 'babar'),
        (4, 'celeste'),
        (5, 'babar'),
        (6, 'celeste'),
        (7, 'babar'),
        (8, 'celeste')
    ]

    h = tsh.get_history(engine, 'xserie')
    assert_df("""
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
insertion_date             value_date         
2017-01-01 00:00:00+00:00  2017-01-10 00:00:00    0.0
                           2017-01-10 01:00:00    1.0
2017-01-02 00:00:00+00:00  2017-01-10 00:00:00    0.0
                           2017-01-10 01:00:00    1.0
                           2017-01-10 02:00:00    2.0
2017-01-03 00:00:00+00:00  2017-01-10 00:00:00    0.0
                           2017-01-10 01:00:00    1.0
                           2017-01-10 02:00:00    2.0
                           2017-01-10 03:00:00    3.0
2017-01-04 00:00:00+00:00  2017-01-10 00:00:00    0.0
                           2017-01-10 01:00:00    1.0
                           2017-01-10 02:00:00    2.0
                           2017-01-10 03:00:00    3.0
                           2017-01-10 04:00:00    4.0
148
149
150
151
152
153
154
""", h)

    csid = tsh.changeset_at(engine, 'xserie', datetime(2017, 1, 3))
    with engine.connect() as cn:
        tsh.strip(cn, 'xserie', csid)

    assert_df("""
155
156
157
158
159
160
insertion_date             value_date         
2017-01-01 00:00:00+00:00  2017-01-10 00:00:00    0.0
                           2017-01-10 01:00:00    1.0
2017-01-02 00:00:00+00:00  2017-01-10 00:00:00    0.0
                           2017-01-10 01:00:00    1.0
                           2017-01-10 02:00:00    2.0
161
162
163
164
165
166
167
168
169
170
171
""", tsh.get_history(engine, 'xserie'))

    assert_df("""
2017-01-10 00:00:00    0.0
2017-01-10 01:00:00    1.0
2017-01-10 02:00:00    2.0
""", tsh.get(engine, 'xserie'))

    # internal structure is ok
    with engine.connect() as cn:
        cn.execute('set search_path to "{}.timeserie"'.format(tsh.namespace))
172
173
        df = pd.read_sql("select id, diff from xserie order by id", cn)
        df['diff'] = df['diff'].apply(lambda x: False if x is None else True)
174
175

    assert_df("""
176
177
178
id   diff
0   1  False
1   2   True
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
""", df)

    log = tsh.log(engine, names=['xserie', 'yserie'])
    # 5 and 7 have disappeared
    assert [l['author'] for l in log
    ] == ['babar', 'celeste', 'babar', 'celeste', 'celeste', 'celeste']

    log = tsh.log(engine, stripped=True, names=['xserie', 'yserie'])
    assert [list(l['meta'].values())[0][:-1] + 'X' for l in log if l['meta']
    ] == [
        'got stripped from X',
        'got stripped from X'
    ]


194
def test_tstamp_roundtrip(engine, tsh):
195
196
    ts = genserie(datetime(2017, 10, 28, 23),
                  'H', 4, tz='UTC')
197
198
199
200
201
202
203
204
205
206
    ts.index = ts.index.tz_convert('Europe/Paris')

    assert_df("""
2017-10-29 01:00:00+02:00    0
2017-10-29 02:00:00+02:00    1
2017-10-29 02:00:00+01:00    2
2017-10-29 03:00:00+01:00    3
Freq: H
    """, ts)

207
208
    tsh.insert(engine, ts, 'tztest', 'Babar')
    back = tsh.get(engine, 'tztest')
209
210
211

    # though un localized we understand it's been normalized to utc
    assert_df("""
212
213
214
215
2017-10-28 23:00:00+00:00    0.0
2017-10-29 00:00:00+00:00    1.0
2017-10-29 01:00:00+00:00    2.0
2017-10-29 02:00:00+00:00    3.0
216
217
218
""", back)

    assert (ts.index == back.index).all()
219
    assert str(back.index.dtype) == 'datetime64[ns, UTC]'
220
221


222
223
def test_multi_index_aware(engine, tsh):
    ts_multi_aware = genserie(
224
225
226
        start=pd.Timestamp(
            2017, 10, 28, 23
        ).tz_localize('UTC').tz_convert('Europe/Paris'),
227
228
        freq=['15T', '30T', '60T'],
        repeat=10,
229
        tz='Europe/Paris',
230
231
232
233
        name='ts_multi_aware',
    )
    ts_multi_aware.index.rename(['a', 'b', 'c'], inplace=True)

234
235
236
237
238
239
240
241
242
243
244
245
246
247
    assert_df("""
a                          b                          c                        
2017-10-29 01:00:00+02:00  2017-10-29 01:00:00+02:00  2017-10-29 01:00:00+02:00    0
2017-10-29 01:15:00+02:00  2017-10-29 01:30:00+02:00  2017-10-29 02:00:00+02:00    1
2017-10-29 01:30:00+02:00  2017-10-29 02:00:00+02:00  2017-10-29 02:00:00+01:00    2
2017-10-29 01:45:00+02:00  2017-10-29 02:30:00+02:00  2017-10-29 03:00:00+01:00    3
2017-10-29 02:00:00+02:00  2017-10-29 02:00:00+01:00  2017-10-29 04:00:00+01:00    4
2017-10-29 02:15:00+02:00  2017-10-29 02:30:00+01:00  2017-10-29 05:00:00+01:00    5
2017-10-29 02:30:00+02:00  2017-10-29 03:00:00+01:00  2017-10-29 06:00:00+01:00    6
2017-10-29 02:45:00+02:00  2017-10-29 03:30:00+01:00  2017-10-29 07:00:00+01:00    7
2017-10-29 02:00:00+01:00  2017-10-29 04:00:00+01:00  2017-10-29 08:00:00+01:00    8
2017-10-29 02:15:00+01:00  2017-10-29 04:30:00+01:00  2017-10-29 09:00:00+01:00    9
""", ts_multi_aware)

248
249
250
251
    tsh.insert(engine, ts_multi_aware, 'ts_multi_aware', 'test')
    ts_aware = tsh.get(engine, 'ts_multi_aware')

    assert_df("""
252
ts_multi_aware
253
a                         b                         c                                        
254
255
256
257
258
259
260
261
262
263
2017-10-28 23:00:00+00:00 2017-10-28 23:00:00+00:00 2017-10-28 23:00:00+00:00             0.0
2017-10-28 23:15:00+00:00 2017-10-28 23:30:00+00:00 2017-10-29 00:00:00+00:00             1.0
2017-10-28 23:30:00+00:00 2017-10-29 00:00:00+00:00 2017-10-29 01:00:00+00:00             2.0
2017-10-28 23:45:00+00:00 2017-10-29 00:30:00+00:00 2017-10-29 02:00:00+00:00             3.0
2017-10-29 00:00:00+00:00 2017-10-29 01:00:00+00:00 2017-10-29 03:00:00+00:00             4.0
2017-10-29 00:15:00+00:00 2017-10-29 01:30:00+00:00 2017-10-29 04:00:00+00:00             5.0
2017-10-29 00:30:00+00:00 2017-10-29 02:00:00+00:00 2017-10-29 05:00:00+00:00             6.0
2017-10-29 00:45:00+00:00 2017-10-29 02:30:00+00:00 2017-10-29 06:00:00+00:00             7.0
2017-10-29 01:00:00+00:00 2017-10-29 03:00:00+00:00 2017-10-29 07:00:00+00:00             8.0
2017-10-29 01:15:00+00:00 2017-10-29 03:30:00+00:00 2017-10-29 08:00:00+00:00             9.0
264
265
266
    """, pd.DataFrame(ts_aware.sort_index()))
    # Note: the columnns are returned according to the alphabetic order

267
268
269
270
271
272
273
274
275
276
277
278
    ts = tsh.get(engine, 'ts_multi_aware',
                 from_value_date=pd.Timestamp(2017, 10, 29, 0).tz_localize('UTC'),
                 to_value_date=pd.Timestamp(2017, 10, 29, 1).tz_localize('UTC'))
    assert_df("""
a                          b                          c                        
2017-10-29 00:00:00+00:00  2017-10-29 01:00:00+00:00  2017-10-29 03:00:00+00:00    4.0
2017-10-29 00:15:00+00:00  2017-10-29 01:30:00+00:00  2017-10-29 04:00:00+00:00    5.0
2017-10-29 00:30:00+00:00  2017-10-29 02:00:00+00:00  2017-10-29 05:00:00+00:00    6.0
2017-10-29 00:45:00+00:00  2017-10-29 02:30:00+00:00  2017-10-29 06:00:00+00:00    7.0
2017-10-29 01:00:00+00:00  2017-10-29 03:00:00+00:00  2017-10-29 07:00:00+00:00    8.0
    """, ts)

279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
    ts = genserie(datetime(2010, 1, 1), 'D', 10)
    with pytest.raises(Exception) as err:
        tsh.insert(engine, ts, 'ts_multi_aware', 'test')
    assert err.value.args[0] == 'Incompatible index types'

    ts = genserie(
        start=pd.Timestamp(
            2017, 10, 28, 23
        ).tz_localize('UTC').tz_convert('Europe/Paris'),
        freq=['15T', '30T'],
        repeat=10,
        tz='Europe/Paris',
        name='ts_multi_aware',
    )
    ts.index.rename(['a', 'b'], inplace=True)

    with pytest.raises(Exception) as err:
        tsh.insert(engine, ts, 'ts_multi_aware', 'test')
    assert err.value.args[0] == "Incompatible multi indexes: ['a', 'b', 'c'] vs ['a', 'b']"

299

300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
def test_chunks(engine, tsh):
    with tempattr(Snapshot, '_bucket_size', 2):
        ts = genserie(datetime(2010, 1, 1), 'D', 5)
        tsh.insert(engine, ts, 'chunks', 'test')

        # we expect 3 chunks
        sql = 'select parent, chunk from "{}.snapshot".chunks order by id'.format(
            tsh.namespace
        )
        chunks = engine.execute(sql).fetchall()
        assert len(chunks) == 3
        assert chunks[0].parent is None
        assert chunks[1].parent == 1
        assert chunks[2].parent == 2
        ts0 = tsh._deserialize(chunks[0].chunk, 'name')
        ts1 = tsh._deserialize(chunks[1].chunk, 'name')
        ts2 = tsh._deserialize(chunks[2].chunk, 'name')

        assert_df("""
2010-01-01    0.0
2010-01-02    1.0
""", ts0)

        assert_df("""
2010-01-03    2.0
2010-01-04    3.0
""", ts1)

        assert_df("""
2010-01-05    4.0
""", ts2)

        assert_df("""
2010-01-01    0.0
2010-01-02    1.0
2010-01-03    2.0
2010-01-04    3.0
2010-01-05    4.0
""", tsh.get(engine, 'chunks'))

        ts = pd.Series([4, 5, 6, 7, 8],
                       index=pd.date_range(start=datetime(2010, 1, 5),
                                           end=datetime(2010, 1, 9),
                                           freq='D'))

        tsh.insert(engine, ts, 'chunks', 'test')
        whole = tsh.get(engine, 'chunks')
        assert_df("""
2010-01-01    0.0
2010-01-02    1.0
2010-01-03    2.0
2010-01-04    3.0
2010-01-05    4.0
2010-01-06    5.0
2010-01-07    6.0
2010-01-08    7.0
2010-01-09    8.0
""", whole)

        # we expect 6 chunks
        sql = 'select id, parent, chunk from "{}.snapshot".chunks order by id'.format(
            tsh.namespace
        )
        chunks = engine.execute(sql).fetchall()
        assert len(chunks) == 6
        assert chunks[4].parent == 4
        assert chunks[5].parent == 5
        assert {
            1: None,
            2: 1,
            3: 2, # head of first commit
            4: 2,
            5: 4,
            6: 5  # head of last commit
        } == {
            chunk.id: chunk.parent for chunk in chunks
        }

        ts3 = tsh._deserialize(chunks[3].chunk, 'name')
        ts4 = tsh._deserialize(chunks[4].chunk, 'name')
        ts5 = tsh._deserialize(chunks[5].chunk, 'name')

        assert_df("""
2010-01-05    4.0
2010-01-06    5.0
""", ts3)

        assert_df("""
2010-01-07    6.0
2010-01-08    7.0
""", ts4)

        assert_df("""
2010-01-09    8.0
""", ts5)

        # non-append edit
        whole[2] = 0
        whole[7] = 0

        tsh.insert(engine, whole, 'chunks', 'test')

        assert_df("""
2010-01-01    0.0
2010-01-02    1.0
2010-01-03    0.0
2010-01-04    3.0
2010-01-05    4.0
2010-01-06    5.0
2010-01-07    6.0
2010-01-08    0.0
2010-01-09    8.0
""", tsh.get(engine, 'chunks'))

        assert_df("""
2010-01-05    4.0
2010-01-06    5.0
2010-01-07    6.0
2010-01-08    0.0
2010-01-09    8.0
""", tsh.get(engine, 'chunks', from_value_date=datetime(2010, 1, 5)))

        # we expect 10 chunks
        # because we edit from the second chunk
        # and 4 new chunks have to be made
        sql = 'select id, parent, chunk from "{}.snapshot".chunks order by id'.format(
            tsh.namespace
        )
        chunks = engine.execute(sql).fetchall()
        assert len(chunks) == 10
        assert {
            1: None,
            2: 1,
            3: 2, # head of first commit
            4: 2,
            5: 4,
            6: 5, # head of second commit
            7: 1, # base of third commit (we lost many shared chunks)
            8: 7,
            9: 8,
            10: 9 # head of last commit
        } == {
            chunk.id: chunk.parent for chunk in chunks
        }

        # 2nd commit chunks without filtering
        snap = Snapshot(engine, tsh, 'chunks')
        chunks = {parent: len(ts) for parent, ts in snap.rawchunks(6)}
        assert chunks == {
            None: 2,
            1: 2,
            2: 2,
            4: 2,
            5: 1
        }
        # 2nd commit chunks with filtering
        chunks = {
            parent: len(ts)
            for parent, ts in snap.rawchunks(6, datetime(2010, 1, 5))
        }
        assert chunks == {2: 2, 4: 2, 5: 1}

        # 3rd commit chunks without filtering
        chunks = {parent: len(ts) for parent, ts in snap.rawchunks(10)}
        assert chunks == {
            None: 2,
            1: 2,
            7: 2,
            8: 2,
            9: 1
        }
        # 3rd commit chunks with filtering
        chunks = {
            parent: len(ts)
            for parent, ts in snap.rawchunks(10, datetime(2010, 1, 5))
        }
        assert chunks == {
            7: 2,
            8: 2,
            9: 1
        }


483
def test_differential(engine, tsh):
484
    ts_begin = genserie(datetime(2010, 1, 1), 'D', 10)
485
    tsh.insert(engine, ts_begin, 'ts_test', 'test')
486

487
488
    assert tsh.exists(engine, 'ts_test')
    assert not tsh.exists(engine, 'this_does_not_exist')
489

490
    assert_df("""
491
492
493
494
495
496
497
498
499
500
2010-01-01    0.0
2010-01-02    1.0
2010-01-03    2.0
2010-01-04    3.0
2010-01-05    4.0
2010-01-06    5.0
2010-01-07    6.0
2010-01-08    7.0
2010-01-09    8.0
2010-01-10    9.0
501
""", tsh.get(engine, 'ts_test'))
502
503

    # we should detect the emission of a message
504
    tsh.insert(engine, ts_begin, 'ts_test', 'babar')
505

506
    assert_df("""
507
508
509
510
511
512
513
514
515
516
2010-01-01    0.0
2010-01-02    1.0
2010-01-03    2.0
2010-01-04    3.0
2010-01-05    4.0
2010-01-06    5.0
2010-01-07    6.0
2010-01-08    7.0
2010-01-09    8.0
2010-01-10    9.0
517
""", tsh.get(engine, 'ts_test'))
518
519
520
521

    ts_slight_variation = ts_begin.copy()
    ts_slight_variation.iloc[3] = 0
    ts_slight_variation.iloc[6] = 0
522
    tsh.insert(engine, ts_slight_variation, 'ts_test', 'celeste')
523

524
    assert_df("""
525
526
527
528
529
530
531
532
533
534
2010-01-01    0.0
2010-01-02    1.0
2010-01-03    2.0
2010-01-04    0.0
2010-01-05    4.0
2010-01-06    5.0
2010-01-07    0.0
2010-01-08    7.0
2010-01-09    8.0
2010-01-10    9.0
535
""", tsh.get(engine, 'ts_test'))
536

537
    ts_longer = genserie(datetime(2010, 1, 3), 'D', 15)
538
539
540
541
    ts_longer.iloc[1] = 2.48
    ts_longer.iloc[3] = 3.14
    ts_longer.iloc[5] = ts_begin.iloc[7]

542
    tsh.insert(engine, ts_longer, 'ts_test', 'test')
543

544
    assert_df("""
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
2010-01-01     0.00
2010-01-02     1.00
2010-01-03     0.00
2010-01-04     2.48
2010-01-05     2.00
2010-01-06     3.14
2010-01-07     4.00
2010-01-08     7.00
2010-01-09     6.00
2010-01-10     7.00
2010-01-11     8.00
2010-01-12     9.00
2010-01-13    10.00
2010-01-14    11.00
2010-01-15    12.00
2010-01-16    13.00
2010-01-17    14.00
562
""", tsh.get(engine, 'ts_test'))
563
564

    # start testing manual overrides
565
    ts_begin = genserie(datetime(2010, 1, 1), 'D', 5, initval=[2])
566
    ts_begin.loc['2010-01-04'] = -1
567
    tsh.insert(engine, ts_begin, 'ts_mixte', 'test')
568
569

    # -1 represents bogus upstream data
570
    assert_df("""
571
572
573
574
575
2010-01-01    2.0
2010-01-02    2.0
2010-01-03    2.0
2010-01-04   -1.0
2010-01-05    2.0
576
""", tsh.get(engine, 'ts_mixte'))
577
578

    # refresh all the period + 1 extra data point
579
    ts_more = genserie(datetime(2010, 1, 2), 'D', 5, [2])
580
    ts_more.loc['2010-01-04'] = -1
581
    tsh.insert(engine, ts_more, 'ts_mixte', 'test')
582

583
    assert_df("""
584
585
586
587
588
589
2010-01-01    2.0
2010-01-02    2.0
2010-01-03    2.0
2010-01-04   -1.0
2010-01-05    2.0
2010-01-06    2.0
590
""", tsh.get(engine, 'ts_mixte'))
591
592

    # just append an extra data point
593
594
    # with no intersection with the previous ts
    ts_one_more = genserie(datetime(2010, 1, 7), 'D', 1, [3])
595
    tsh.insert(engine, ts_one_more, 'ts_mixte', 'test')
596

597
    assert_df("""
598
599
600
601
602
603
604
2010-01-01    2.0
2010-01-02    2.0
2010-01-03    2.0
2010-01-04   -1.0
2010-01-05    2.0
2010-01-06    2.0
2010-01-07    3.0
605
""", tsh.get(engine, 'ts_mixte'))
606

607
    with engine.connect() as cn:
608
        cn.execute('set search_path to "{0}.timeserie", {0}, public'.format(tsh.namespace))
609
610
611
        allts = pd.read_sql("select name, table_name from registry "
                            "where name in ('ts_test', 'ts_mixte')",
                            cn)
612

613
614
        assert_df("""
name              table_name
615
616
617
0   ts_test   {0}.timeserie.ts_test
1  ts_mixte  {0}.timeserie.ts_mixte
""".format(tsh.namespace), allts)
618

619
        assert_df("""
620
621
622
623
624
625
626
2010-01-01    2.0
2010-01-02    2.0
2010-01-03    2.0
2010-01-04   -1.0
2010-01-05    2.0
2010-01-06    2.0
2010-01-07    3.0
627
""", tsh.get(cn, 'ts_mixte',
628
             revision_date=datetime.now()))
629
630


631
def test_bad_import(engine, tsh):
632
    # the data were parsed as date by pd.read_json()
Aurélien Campéas's avatar
Aurélien Campéas committed
633
    df_result = pd.read_csv(str(DATADIR / 'test_data.csv'))
634
635
636
    df_result['Gas Day'] = df_result['Gas Day'].apply(parser.parse, dayfirst=True, yearfirst=False)
    df_result.set_index('Gas Day', inplace=True)
    ts = df_result['SC']
637

638
639
    tsh.insert(engine, ts, 'SND_SC', 'test')
    result = tsh.get(engine, 'SND_SC')
640
    assert result.dtype == 'float64'
641
642
643

    # insertion of empty ts
    ts = pd.Series(name='truc', dtype='object')
644
645
    tsh.insert(engine, ts, 'empty_ts', 'test')
    assert tsh.get(engine, 'empty_ts') is None
646
647
648

    # nan in ts
    # all na
649
    ts = genserie(datetime(2010, 1, 10), 'D', 10, [np.nan], name='truc')
650
651
    tsh.insert(engine, ts, 'test_nan', 'test')
    assert tsh.get(engine, 'test_nan') is None
652
653
654
655
656

    # mixe na
    ts = pd.Series([np.nan] * 5 + [3] * 5,
                   index=pd.date_range(start=datetime(2010, 1, 10),
                                       freq='D', periods=10), name='truc')
657
658
    tsh.insert(engine, ts, 'test_nan', 'test')
    result = tsh.get(engine, 'test_nan')
659

660
661
    tsh.insert(engine, ts, 'test_nan', 'test')
    result = tsh.get(engine, 'test_nan')
662
    assert_df("""
663
664
665
666
667
2010-01-15    3.0
2010-01-16    3.0
2010-01-17    3.0
2010-01-18    3.0
2010-01-19    3.0
668
""", result)
669
670
671

    # get_ts with name not in database

672
    tsh.get(engine, 'inexisting_name', 'test')
673
674


675
def test_revision_date(engine, tsh):
676
    # we prepare a good joke for the end of the test
677
678
    # ival = Snapshot._interval
    # Snapshot._interval = 3
679
680
681
682

    for i in range(1, 5):
        with engine.connect() as cn:
            with tsh.newchangeset(cn, 'test',
683
                                  _insertion_date=utcdt(2016, 1, i)):
684
685
686
                tsh.insert(cn, genserie(datetime(2017, 1, i), 'D', 3, [i]), 'revdate')

    # end of prologue, now some real meat
687
    idate0 = pd.Timestamp('2015-1-1 00:00:00', tz='UTC')
688
689
690
691
692
    with tsh.newchangeset(engine, 'test', _insertion_date=idate0):
        ts = genserie(datetime(2010, 1, 4), 'D', 4, [0], name='truc')
        tsh.insert(engine, ts, 'ts_through_time')
        assert idate0 == tsh.latest_insertion_date(engine, 'ts_through_time')

693
    idate1 = pd.Timestamp('2015-1-1 15:45:23', tz='UTC')
694
    with tsh.newchangeset(engine, 'test', _insertion_date=idate1):
695
        ts = genserie(datetime(2010, 1, 4), 'D', 4, [1], name='truc')
696
697
        tsh.insert(engine, ts, 'ts_through_time')
        assert idate1 == tsh.latest_insertion_date(engine, 'ts_through_time')
698

699
    idate2 = pd.Timestamp('2015-1-2 15:43:23', tz='UTC')
700
    with tsh.newchangeset(engine, 'test', _insertion_date=idate2):
701
        ts = genserie(datetime(2010, 1, 4), 'D', 4, [2], name='truc')
702
703
        tsh.insert(engine, ts, 'ts_through_time')
        assert idate2 == tsh.latest_insertion_date(engine, 'ts_through_time')
704

705
    idate3 = pd.Timestamp('2015-1-3', tz='UTC')
706
    with tsh.newchangeset(engine, 'test', _insertion_date=idate3):
707
        ts = genserie(datetime(2010, 1, 4), 'D', 4, [3], name='truc')
708
709
        tsh.insert(engine, ts, 'ts_through_time')
        assert idate3 == tsh.latest_insertion_date(engine, 'ts_through_time')
710

711
    ts = tsh.get(engine, 'ts_through_time')
712

713
    assert_df("""
714
715
716
717
2010-01-04    3.0
2010-01-05    3.0
2010-01-06    3.0
2010-01-07    3.0
718
""", ts)
719

720
    ts = tsh.get(engine, 'ts_through_time',
Aurélien Campéas's avatar
Aurélien Campéas committed
721
                 revision_date=datetime(2015, 1, 2, 18, 43, 23))
722

723
    assert_df("""
724
725
726
727
2010-01-04    2.0
2010-01-05    2.0
2010-01-06    2.0
2010-01-07    2.0
728
""", ts)
729

730
    ts = tsh.get(engine, 'ts_through_time',
731
                 revision_date=datetime(2015, 1, 1, 18, 43, 23))
732

733
    assert_df("""
734
735
736
737
2010-01-04    1.0
2010-01-05    1.0
2010-01-06    1.0
2010-01-07    1.0
738
""", ts)
739

740
    ts = tsh.get(engine, 'ts_through_time',
741
                 revision_date=datetime(2014, 1, 1, 18, 43, 23))
742
743
744

    assert ts is None

745
746
747
748
749
750
751
752
753
754
    # epilogue: back to the revdate issue
    assert_df("""
2017-01-01    1.0
2017-01-02    2.0
2017-01-03    3.0
2017-01-04    4.0
2017-01-05    4.0
2017-01-06    4.0
""", tsh.get(engine, 'revdate'))

755
    oldstate = tsh.get(engine, 'revdate', revision_date=datetime(2016, 1, 2))
756
757
758
    assert_df("""
2017-01-01    1.0
2017-01-02    2.0
759
760
761
2017-01-03    2.0
2017-01-04    2.0
""", oldstate)
762

763
    # Snapshot._interval = ival
764

765

766
def _test_snapshots(engine, tsh):
767
768
    baseinterval = Snapshot._interval
    Snapshot._interval = 4
769

770
    with engine.connect() as cn:
771
        for tscount in range(1, 11):
772
            ts = genserie(datetime(2015, 1, 1), 'D', tscount, [1])
773
            diff = tsh.insert(cn, ts, 'growing', 'babar')
774
775
            assert diff.index[0] == diff.index[-1] == ts.index[-1]

776
    diff = tsh.insert(engine, ts, 'growing', 'babar')
777
    assert diff is None
778

779
    with engine.connect() as cn:
780
781
782
783
784
785
786
787
788
789
        cn.execute('set search_path to "{}.snapshot"'.format(tsh.namespace))
#         df = pd.read_sql("select cset from growing",
#                          cn)
#         assert_df("""
# cset
# 0     1
# 1     4
# 2     8
# 3    10
# """, df)
790

791
792
        ts = tsh.get(cn, 'growing')
        assert_df("""
793
794
795
796
797
798
799
800
801
802
2015-01-01    1.0
2015-01-02    1.0
2015-01-03    1.0
2015-01-04    1.0
2015-01-05    1.0
2015-01-06    1.0
2015-01-07    1.0
2015-01-08    1.0
2015-01-09    1.0
2015-01-10    1.0
803
""", ts)
804

805
806
        df = pd.read_sql("select id, chunkhead from growing order by id", cn)
        df['chunkhead'] = df['chunkhead'].apply(lambda x: 0 if x is None else len(x))
807

808
        assert_df("""
809
810
811
812
813
id  chunkhead
0   1         35
1   4         47
2   8         59
3  10         67
814
""", df)
815

816
817
    # table = tsh._get_ts_table(engine, 'growing')
    _, snap = Snapshot(engine, tsh, 'growing').find()
818
    assert (ts == snap).all()
819
    Snapshot._interval = baseinterval
820
821


822
def test_deletion(engine, tsh):
823
824
    ts_begin = genserie(datetime(2010, 1, 1), 'D', 11)
    ts_begin.iloc[-1] = np.nan
825
    tsh.insert(engine, ts_begin, 'ts_del', 'test')
826

827
    ts = Snapshot(engine, tsh, 'ts_del').build_upto()
828
    assert ts.iloc[-1] == 9.0
829

830
    ts_begin.iloc[0] = np.nan
831
    ts_begin.iloc[3] = np.nan
832

833
    tsh.insert(engine, ts_begin, 'ts_del', 'test')
834

835
    assert_df("""
836
837
838
839
840
841
842
2010-01-02    1.0
2010-01-03    2.0
2010-01-05    4.0
2010-01-06    5.0
2010-01-07    6.0
2010-01-08    7.0
2010-01-09    8.0
843
2010-01-10    9.0
844
""", tsh.get(engine, 'ts_del'))
845

846
    ts2 = tsh.get(engine, 'ts_del',
847
848
                 # force snapshot reconstruction feature
                 revision_date=datetime(2038, 1, 1))
849
    assert (tsh.get(engine, 'ts_del') == ts2).all()
850

851
852
853
    ts_begin.iloc[0] = 42
    ts_begin.iloc[3] = 23

854
    tsh.insert(engine, ts_begin, 'ts_del', 'test')
855

856
    assert_df("""
857
858
859
860
861
862
863
864
865
2010-01-01    42.0
2010-01-02     1.0
2010-01-03     2.0
2010-01-04    23.0
2010-01-05     4.0
2010-01-06     5.0
2010-01-07     6.0
2010-01-08     7.0
2010-01-09     8.0
866
2010-01-10     9.0
867
""", tsh.get(engine, 'ts_del'))
868
869
870

    # now with string!

871
    ts_string = genserie(datetime(2010, 1, 1), 'D', 10, ['machin'])
872
    tsh.insert(engine, ts_string, 'ts_string_del', 'test')
873
874
875
876

    ts_string[4] = None
    ts_string[5] = None

877
    tsh.insert(engine, ts_string, 'ts_string_del', 'test')
878
    assert_df("""
879
880
881
882
883
884
885
886
2010-01-01    machin
2010-01-02    machin
2010-01-03    machin
2010-01-04    machin
2010-01-07    machin
2010-01-08    machin
2010-01-09    machin
2010-01-10    machin
887
""", tsh.get(engine, 'ts_string_del'))
888
889
890
891

    ts_string[4] = 'truc'
    ts_string[6] = 'truc'

892
    tsh.insert(engine, ts_string, 'ts_string_del', 'test')
893
    assert_df("""
894
895
896
897
898
899
900
901
2010-01-01    machin
2010-01-02    machin
2010-01-03    machin
2010-01-04    machin
2010-01-05      truc
2010-01-07      truc
2010-01-08    machin
2010-01-09    machin
902
2010-01-10    machin
903
""", tsh.get(engine, 'ts_string_del'))
904

905
    ts_string[ts_string.index] = np.nan
906
    tsh.insert(engine, ts_string, 'ts_string_del', 'test')
907

908
    erased = tsh.get(engine, 'ts_string_del')
909
910
    assert len(erased) == 0

911
912
    # first insertion with only nan

913
    ts_begin = genserie(datetime(2010, 1, 1), 'D', 10, [np.nan])
914
    tsh.insert(engine, ts_begin, 'ts_null', 'test')
915

916
    assert tsh.get(engine, 'ts_null') is None
917

918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
    # exhibit issue with nans handling
    ts_repushed = genserie(datetime(2010, 1, 1), 'D', 11)
    ts_repushed[0:3] = np.nan

    assert_df("""
2010-01-01     NaN
2010-01-02     NaN
2010-01-03     NaN
2010-01-04     3.0
2010-01-05     4.0
2010-01-06     5.0
2010-01-07     6.0
2010-01-08     7.0
2010-01-09     8.0
2010-01-10     9.0
2010-01-11    10.0
Freq: D
""", ts_repushed)

937
938
    tsh.insert(engine, ts_repushed, 'ts_repushed', 'test')
    diff = tsh.insert(engine, ts_repushed, 'ts_repushed', 'test')
939
940
    assert diff is None

941
    # there is no difference
942
    assert 0 == len(tsh.diff(ts_repushed, ts_repushed))
943
944
945
946
947

    ts_add = genserie(datetime(2010, 1, 1), 'D', 15)
    ts_add.iloc[0] = np.nan
    ts_add.iloc[13:] = np.nan
    ts_add.iloc[8] = np.nan
948
    diff = tsh.diff(ts_repushed, ts_add)
949
950
951
952
953
954

    assert_df("""
2010-01-02     1.0
2010-01-03     2.0
2010-01-09     NaN
2010-01-12    11.0
955
2010-01-13    12.0""", diff.sort_index())
956
957
958
959
    # value on nan => value
    # nan on value => nan
    # nan on nan => Nothing
    # nan on nothing=> Nothing
960

Aurélien Campéas's avatar
Aurélien Campéas committed
961
    # full erasing
962
963
    # numeric
    ts_begin = genserie(datetime(2010, 1, 1), 'D', 4)
964
    tsh.insert(engine, ts_begin, 'ts_full_del', 'test')
965

Aurélien Campéas's avatar
Aurélien Campéas committed
966
    ts_begin.iloc[:] = np.nan
967
    tsh.insert(engine, ts_begin, 'ts_full_del', 'test')
968
969

    ts_end = genserie(datetime(2010, 1, 1), 'D', 4)
970
    tsh.insert(engine, ts_end, 'ts_full_del', 'test')
971
972
973
974

    # string

    ts_begin = genserie(datetime(2010, 1, 1), 'D', 4, ['text'])
975
    tsh.insert(engine, ts_begin, 'ts_full_del_str', 'test')
976
977

    ts_begin.iloc[:] = np.nan
978
    tsh.insert(engine, ts_begin, 'ts_full_del_str', 'test')
979
980

    ts_end = genserie(datetime(2010, 1, 1), 'D', 4, ['text'])
981
    tsh.insert(engine, ts_end, 'ts_full_del_str', 'test')
982

Aurélien Campéas's avatar
Aurélien Campéas committed
983

984
def test_multi_index(engine, tsh):
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
    appdate_0 = pd.DatetimeIndex(start=datetime(2015, 1, 1),
                                 end=datetime(2015, 1, 2),
                                 freq='D').values
    pubdate_0 = [pd.datetime(2015, 1, 11, 12, 0, 0)] * 2
    insertion_date_0 = [pd.datetime(2015, 1, 11, 12, 30, 0)] * 2

    multi = [
        appdate_0,
        np.array(pubdate_0),
        np.array(insertion_date_0)
    ]

    ts_multi = pd.Series(range(2), index=multi)
    ts_multi.index.rename(['b', 'c', 'a'], inplace=True)

1000
    tsh.insert(engine, ts_multi, 'ts_multi_simple', 'test')
1001

1002
    ts = tsh.get(engine, 'ts_multi_simple')
1003
1004
1005
    assert_df("""
                                                    ts_multi_simple
a                   b          c                                   
1006
1007
2015-01-11 12:30:00 2015-01-01 2015-01-11 12:00:00              0.0
                    2015-01-02 2015-01-11 12:00:00              1.0
1008
1009
""", pd.DataFrame(ts))

1010
    diff = tsh.insert(engine, ts_multi, 'ts_multi_simple', 'test')
1011
1012
1013
1014
1015
    assert diff is None

    ts_multi_2 = pd.Series([0, 2], index=multi)
    ts_multi_2.index.rename(['b', 'c', 'a'], inplace=True)

1016
1017
    tsh.insert(engine, ts_multi_2, 'ts_multi_simple', 'test')
    ts = tsh.get(engine, 'ts_multi_simple')
1018
1019
1020
1021

    assert_df("""
                                                    ts_multi_simple
a                   b          c                                   
1022
1023
2015-01-11 12:30:00 2015-01-01 2015-01-11 12:00:00              0.0
                    2015-01-02 2015-01-11 12:00:00              2.0
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
""", pd.DataFrame(ts))

    # bigger ts
    appdate_0 = pd.DatetimeIndex(start=datetime(2015, 1, 1),
                                 end=datetime(2015, 1, 4),
                                 freq='D').values
    pubdate_0 = [pd.datetime(2015, 1, 11, 12, 0, 0)] * 4
    insertion_date_0 = [pd.datetime(2015, 1, 11, 12, 30, 0)] * 4

    appdate_1 = pd.DatetimeIndex(start=datetime(2015, 1, 1),
                                 end=datetime(2015, 1, 4),
                                 freq='D').values

    pubdate_1 = [pd.datetime(2015, 1, 21, 12, 0, 0)] * 4
    insertion_date_1 = [pd.datetime(2015, 1, 21, 12, 30, 0)] * 4

    multi = [
        np.concatenate([appdate_0, appdate_1]),
        np.array(pubdate_0 + pubdate_1),
        np.array(insertion_date_0 + insertion_date_1)
    ]

    ts_multi = pd.Series(range(8), index=multi)
    ts_multi.index.rename(['a', 'c', 'b'], inplace=True)

1049
1050
    tsh.insert(engine, ts_multi, 'ts_multi', 'test')
    ts = tsh.get(engine, 'ts_multi')
1051
1052
1053
1054

    assert_df("""
                                                    ts_multi
a          b                   c                            
1055
1056
1057
1058
1059
1060
1061
1062
2015-01-01 2015-01-11 12:30:00 2015-01-11 12:00:00       0.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
2015-01-02 2015-01-11 12:30:00 2015-01-11 12:00:00       1.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       5.0
2015-01-03 2015-01-11 12:30:00 2015-01-11 12:00:00       2.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       6.0
2015-01-04 2015-01-11 12:30:00 2015-01-11 12:00:00       3.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       7.0
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
    """, pd.DataFrame(ts.sort_index()))
    # Note: the columnns are returned according to the alphabetic order

    appdate_2 = pd.DatetimeIndex(start=datetime(2015, 1, 1),
                                 end=datetime(2015, 1, 4),
                                 freq='D').values
    pubdate_2 = [pd.datetime(2015, 1, 31, 12, 0, 0)] * 4
    insertion_date_2 = [pd.datetime(2015, 1, 31, 12, 30, 0)] * 4

    multi_2 = [
        np.concatenate([appdate_1, appdate_2]),
        np.array(pubdate_1 + pubdate_2),
        np.array(insertion_date_1 + insertion_date_2)
    ]

    ts_multi_2 = pd.Series([4] * 8, index=multi_2)
    ts_multi_2.index.rename(['a', 'c', 'b'], inplace=True)

    # A second ts is inserted with some index in common with the first
    # one: appdate_1, pubdate_1,and insertion_date_1. The value is set
    # at 4, which matches the previous value of the "2015-01-01" point.

1085
    diff = tsh.insert(engine, ts_multi_2, 'ts_multi', 'test')
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
    assert_df("""
                                                    ts_multi
a          b                   c                            
2015-01-01 2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
2015-01-02 2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
           2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
2015-01-03 2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
           2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
2015-01-04 2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
           2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
        """, pd.DataFrame(diff.sort_index()))
    # the differential skips a value for "2015-01-01"
    # which does not change from the previous ts

1100
    ts = tsh.get(engine, 'ts_multi')
1101
1102
1103
    assert_df("""
                                                    ts_multi
a          b                   c                            
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
2015-01-01 2015-01-11 12:30:00 2015-01-11 12:00:00       0.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
           2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
2015-01-02 2015-01-11 12:30:00 2015-01-11 12:00:00       1.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
           2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
2015-01-03 2015-01-11 12:30:00 2015-01-11 12:00:00       2.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
           2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
2015-01-04 2015-01-11 12:30:00 2015-01-11 12:00:00       3.0
           2015-01-21 12:30:00 2015-01-21 12:00:00       4.0
           2015-01-31 12:30:00 2015-01-31 12:00:00       4.0
1116
1117
1118
        """, pd.DataFrame(ts.sort_index()))

    # the result ts have now 3 values for each point in 'a'
1119
1120


1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
def test_multi_index_get_history(engine, tsh):
    appdate = pd.DatetimeIndex(
        start=datetime(2015, 1, 1),
        end=datetime(2015, 1, 2),
        freq='D'
    ).values
    forecast_date = [pd.Timestamp(2015, 1, 11, 12, 0, 0)] * 2
    multi = [
        appdate,
        np.array(forecast_date),
    ]

    ts_multi = pd.Series(range(2), index=multi)
    ts_multi.index.rename(['app_date', 'fc_date'], inplace=True)

    tsh.insert(engine, ts_multi, 'ts_mi', 'Babar',
1137
               _insertion_date=utcdt(2015, 1, 11, 12, 30, 0))
1138
1139
1140
1141


    ts = tsh.get_history(engine, 'ts_mi')
    assert_df("""
1142
1143
1144
insertion_date             app_date    fc_date            
2015-01-11 12:30:00+00:00  2015-01-01  2015-01-11 12:00:00    0.0
                           2015-01-02  2015-01-11 12:00:00    1.0
1145
1146
1147
1148
1149
""", ts)

    ts = tsh.get_history(engine, 'ts_mi', diffmode=True