tsio.py 25.2 KB
Newer Older
1
from datetime import datetime
2
import logging
3
4
import hashlib
import uuid
5
6
import json
from pathlib import Path
7
8
9

import pandas as pd

10
from deprecated import deprecated
11
from sqlhelp import sqlfile, select, insert
12

13
from tshistory.util import (
14
    bisect_search,
15
    closed_overlaps,
16
    num2float,
17
    pruned_history,
18
    SeriesServices,
19
    start_end,
20
    tx,
21
22
    tzaware_serie
)
23
from tshistory.snapshot import Snapshot
24

25
L = logging.getLogger('tshistory.tsio')
26
SERIESSCHEMA = Path(__file__).parent / 'series.sql'
27
28


29
class timeseries(SeriesServices):
30
    namespace = 'tsh'
31
    schema = None
32
33
34
35
36
37
38
    metakeys = {
        'tzaware',
        'index_type',
        'index_dtype',
        'value_dtype',
        'value_type'
    }
39
    create_lock_id = None
40
    delete_lock_id = None
41
42
43

    def __init__(self, namespace='tsh'):
        self.namespace = namespace
44
        self.create_lock_id = sum(ord(c) for c in namespace)
45
        self.delete_lock_id = sum(ord(c) for c in namespace)
46

47
    @tx
48
    def insert(self, cn, newts, name, author,
49
               metadata=None,
50
               insertion_date=None):
51
        """Create a new revision of a given time series
52

53
        newts: pandas.Series with date index
54
        name: str unique identifier of the serie
55
        author: str free-form author name
56
        metadata: optional dict for changeset metadata
57
        """
58
59
        if not len(newts):
            return
60
61
        newts = self._guard_insert(
            newts, name, author, metadata,
62
            insertion_date
63
        )
64

65
        assert ('<M8[ns]' == newts.index.dtype or
66
                'datetime' in str(newts.index.dtype) and not
67
68
                isinstance(newts.index, pd.MultiIndex))

69
        newts.name = name
70
        tablename = self._series_to_tablename(cn, name)
71

72
        if tablename is None:
73
74
            seriesmeta = self._series_initial_meta(cn, name, newts)
            return self._create(cn, newts, name, author, seriesmeta,
75
                                metadata, insertion_date)
76

77
        return self._update(cn, tablename, newts, name, author,
78
                            metadata, insertion_date)
79

80
    def list_series(self, cn):
81
        """Return the mapping of all series to their type"""
82
        sql = f'select seriesname from "{self.namespace}".registry '
83
        return {
84
            row.seriesname: 'primary'
85
86
            for row in cn.execute(sql)
        }
87

88
    @tx
89
    def get(self, cn, name, revision_date=None,
90
91
            from_value_date=None, to_value_date=None,
            _keep_nans=False):
92
93
94
95
96
        """Compute and return the serie of a given name

        revision_date: datetime filter to get previous versions of the
        serie

97
        """
98
        if not self.exists(cn, name):
99
            return
100

101
        csetfilter = []
102
        if revision_date:
103
            csetfilter.append(
104
                lambda q: q.where(
105
                    f'insertion_date <= %(idate)s', idate=revision_date
106
                )
107
            )
108
        snap = Snapshot(cn, self, name)
109
        _, current = snap.find(csetfilter=csetfilter,
110
111
                               from_value_date=from_value_date,
                               to_value_date=to_value_date)
112

113
        if current is not None and not _keep_nans:
114
            current.name = name
115
            current = current.dropna()
116
        return current
117

118
    @tx
119
    def metadata(self, cn, name):
120
        """Return metadata dict of timeserie."""
121
        sql = (f'select metadata from "{self.namespace}".registry '
122
123
               'where seriesname = %(name)s')
        meta = cn.execute(sql, name=name).scalar()
124
125
        return meta

126
    @tx
127
    def update_metadata(self, cn, name, metadata, internal=False):
128
        assert isinstance(metadata, dict)
129
        assert internal or not set(metadata.keys()) & self.metakeys
130
        meta = self.metadata(cn, name)
131
        # remove al but internal stuff
132
133
134
135
136
        newmeta = {
            key: meta[key]
            for key in self.metakeys
            if meta.get(key) is not None
        }
137
        newmeta.update(metadata)
138
139
        sql = (f'update "{self.namespace}".registry as reg '
               'set metadata = %(metadata)s '
140
               'where reg.seriesname = %(seriesname)s')
141
142
143
        cn.execute(
            sql,
            metadata=json.dumps(newmeta),
144
            seriesname=name
145
        )
146

147
    def changeset_metadata(self, cn, csid):
148
        assert isinstance(csid, int)
149
        q = select(
150
            'metadata'
151
        ).table(
152
153
154
155
156
            f'"{self.namespace}".changeset'
        ).where(
            f'id = %(csid)s', csid=csid
        )
        return q.do(cn).scalar()
157

158
159
160
    def type(self, cn, name):
        return 'primary'

161
    @tx
162
    def history(self, cn, name,
163
164
165
166
167
168
                from_insertion_date=None,
                to_insertion_date=None,
                from_value_date=None,
                to_value_date=None,
                diffmode=False,
                _keep_nans=False):
169
        tablename = self._series_to_tablename(cn, name)
170
171
172
        if tablename is None:
            return

173
        revs = self._revisions(
174
            cn, name,
175
176
177
178
179
            from_insertion_date,
            to_insertion_date,
            from_value_date,
            to_value_date
        )
180

181
        if not revs:
182
            return {}
183

184
185
186
        if diffmode:
            # compute the previous serie value
            first_csid = revs[0][0]
187
            previous_csid = self._previous_cset(cn, name, first_csid)
188
189
            revs.insert(0, (previous_csid, None))

190
        snapshot = Snapshot(cn, self, name)
191
192
193
194
195
        series = snapshot.findall(
            revs,
            from_value_date,
            to_value_date
        )
196

197
198
        if diffmode:
            diffs = []
Aurélien Campéas's avatar
Aurélien Campéas committed
199
            for (_revdate_a, serie_a), (revdate_b, serie_b) in zip(series, series[1:]):
200
201
202
203
204
                if serie_a is None:
                    # when we scan the entirety of the history: there exists no "previous" serie
                    # we therefore consider the first serie as a diff to the "null" serie
                    diffs.append((revdate_b, serie_b))
                else:
205
206
207
                    diff = self.diff(serie_a, serie_b)
                    if len(diff):
                        diffs.append((revdate_b, diff))
208
            series = diffs
209
210
        else:
            series = [
211
                (idate, ts if _keep_nans else ts.dropna() )
212
213
                 for idate, ts in series
            ]
214

215
        hist = {
216
217
218
            idate: ts
            for idate, ts in series if len(series)
        }
219

220
221
222
223
224
225
226
227
        if from_value_date or to_value_date:
            # now it's possible that the extremities cut
            # yields similar series for successive idates
            # and we are not interested in that
            hist = pruned_history(hist)

        return hist

228
    @tx
229
    def staircase(self, cn, name, delta,
230
231
                  from_value_date=None,
                  to_value_date=None):
232
233
234
235
        """ compute a series whose value dates are bounded to be
        `delta` time after the insertion dates and where we
        keep the most recent ones
        """
236
        if not self.exists(cn, name):
237
238
            return

239
        base = self.get(
240
            cn, name,
241
242
243
            from_value_date=from_value_date,
            to_value_date=to_value_date,
            _keep_nans=True
244
        )
245
        if not len(base):
246
            return pd.Series(name=name)
247
248

        # prepare the needed revision dates
249
250
        fromidate = base.index.min() - delta
        toidate = base.index.max() - delta
251

252
        hcache = historycache(
253
            self, cn, name,
254
            from_value_date=from_value_date,
255
            to_value_date=to_value_date,
256
257
            to_insertion_date=toidate,
            tzaware=self.metadata(cn, name).get('tzaware')
258
         )
259

260
261
262
263
264
        return hcache.staircase(
            delta,
            from_value_date,
            to_value_date
        )
265

266
    @tx
267
    def exists(self, cn, name):
268
        return self._series_to_tablename(cn, name) is not None
269

270
    @tx
271
    def latest_insertion_date(self, cn, name):
272
        tablename = self._series_to_tablename(cn, name)
273
274
        q = select('max(insertion_date)').table(
            f'"{self.namespace}.revision"."{tablename}"'
275
        )
276
        idate = pd.Timestamp(
277
            q.do(cn).scalar()
278
279
280
        )
        if not pd.isnull(idate):
            return idate.astimezone('UTC')
281

282
    @tx
283
    def insertion_dates(self, cn, name,
284
                        fromdate=None, todate=None):
285
        revs = self._revisions(
286
            cn, name,
287
288
289
            from_insertion_date=fromdate,
            to_insertion_date=todate
        )
290

291
        return [
292
            idate
293
            for _cset, idate in revs
294
295
        ]

296
    @tx
297
298
    def last_id(self, cn, name):
        snapshot = Snapshot(cn, self, name)
299
300
        return snapshot.last_id()

301
    @tx
302
    def changeset_at(self, cn, name, revdate, mode='strict'):
303
304
305
306
307
        operators = {
            'strict': '=',
            'before': '<=',
            'after': '>='
        }
308
        tablename = self._series_to_tablename(cn, name)
309
        assert mode in operators
310
        q = select(
311
            'id'
312
        ).table(
313
            f'"{self.namespace}.revision"."{tablename}"',
314
        ).where(
315
            f'insertion_date {operators[mode]} %(revdate)s',
316
317
318
            revdate=revdate
        )
        return q.do(cn).scalar()
319

320
321
    @tx
    def rename(self, cn, oldname, newname):
322
        sql = (f'update "{self.namespace}".registry '
323
324
               'set seriesname = %(newname)s '
               'where seriesname = %(oldname)s')
325
        cn.execute(sql, oldname=oldname, newname=newname)
326

327
    @tx
328
    def delete(self, cn, name):
329
        tablename = self._series_to_tablename(cn, name)
330
        if tablename is None:
331
            print('not deleting unknown series', name, self.namespace)
332
            return
333
334
335
336
        # serialize all deletions to avoid deadlocks
        cn.execute(
            f'select pg_advisory_xact_lock({self.delete_lock_id})'
        )
337
        rid, tablename = cn.execute(
338
339
340
            f'select id, tablename from "{self.namespace}".registry '
            'where seriesname = %(seriesname)s',
            seriesname=name
341
342
343
        ).fetchone()
        # drop series tables
        cn.execute(
344
            f'drop table "{self.namespace}.revision"."{tablename}" cascade'
345
346
        )
        cn.execute(
347
            f'drop table "{self.namespace}.snapshot"."{tablename}" cascade'
348
        )
349
350
        cn.execute(f'delete from "{self.namespace}".registry '
                   'where id = %(rid)s',
351
352
                   rid=rid)

353
    @tx
354
    def strip(self, cn, name, csid):
355
        # wipe the diffs
356
        tablename = self._series_to_tablename(cn, name)
357
358
        sql = (f'delete from "{self.namespace}.revision"."{tablename}" '
               'where id >= %(csid)s')
359
        cn.execute(sql, csid=csid)
360
        snapshot = Snapshot(cn, self, name)
361
362
        snapshot.reclaim()

363
    def info(self, cn):
364
365
        """Gather global statistics on the current tshistory repository
        """
366
        sql = f'select count(*) from "{self.namespace}".registry'
367
        stats = {'series count': cn.execute(sql).scalar()}
368
369
        sql = (f'select distinct seriesname from "{self.namespace}".registry '
               'order by seriesname')
370
        stats['serie names'] = [row for row, in cn.execute(sql).fetchall()]
371
372
        return stats

373
    @tx
374
    def log(self, cn, name, limit=0, authors=None,
375
376
            fromrev=None, torev=None,
            fromdate=None, todate=None):
377
        """Build a structure showing the history of a series in the db,
378
379
380
        per changeset, in chronological order.
        """
        log = []
381
382
383
384
385
386
387
388
389
390
391
392
        q = self._log_series_query(
            cn, name, authors,
            fromdate, todate
        )
        rset = q.do(cn)
        for csetid, author, revdate, meta in rset.fetchall():
            log.append({'rev': csetid, 'author': author,
                        'date': pd.Timestamp(revdate).tz_convert('utc'),
                        'meta': json.loads(meta) if meta else {}})

        log.sort(key=lambda rev: rev['rev'])
        return log
393

394
395
396
    def _log_series_query(self, cn, name,
                          authors=None,
                          fromdate=None, todate=None):
397
        tablename = self._series_to_tablename(cn, name)
398
        q = select(
399
            'id', 'author', 'insertion_date', 'metadata',
400
            opt='distinct'
401
        ).table(
402
            f'"{self.namespace}.revision"."{tablename}"'
403
        )
404

405
        if authors:
406
            q.where(
407
                'author in %(authors)s',
408
                author=tuple(authors)
409
            )
410
        if fromdate:
411
            q.where('insertion_date >= %(fromdate)s', fromdate=fromdate)
412
        if todate:
413
            q.where('insertion_date <= %(todate)s', todate=todate)
414

415
416
        q.order('id', 'desc')
        return q
417

418
    @tx
419
    def interval(self, cn, name, notz=False):
420
        tablename = self._series_to_tablename(cn, name)
421
        if tablename is None:
422
            raise ValueError(f'no such serie: {name}')
423
        sql = (f'select tsstart, tsend '
424
425
               f'from "{self.namespace}.revision"."{tablename}" '
               f'order by id desc limit 1')
426
        res = cn.execute(sql).fetchone()
427
        start, end = res.tsstart, res.tsend
428
        tz = None
429
        if self.metadata(cn, name).get('tzaware') and not notz:
430
431
            tz = 'UTC'
        start, end = pd.Timestamp(start, tz=tz), pd.Timestamp(end, tz=tz)
432
433
        return pd.Interval(left=start, right=end, closed='both')

434
435
    # /API
    # Helpers
436

437
438
    # creation / update

439
440
441
442
443
444
    def _guard_insert(self, newts, name, author, metadata, insertion_date):
        assert isinstance(name, str), 'Name not a string'
        assert isinstance(author, str), 'Author not a string'
        assert metadata is None or isinstance(metadata, dict), 'Bad format for metadata'
        assert (insertion_date is None or
                isinstance(insertion_date, datetime)), 'Bad format for insertion date'
445
        assert isinstance(newts, pd.Series), 'Not a pd.Series'
446
447
        index = newts.index
        assert not index.duplicated().any(), 'There are some duplicates in the index'
448

449
450
        assert index.notna().all(), 'The index contains NaT entries'
        if not index.is_monotonic_increasing:
451
            newts = newts.sort_index()
452
453
        if index.tz is not None:
           newts.index = index.tz_convert('UTC')
454
455
456

        return num2float(newts)

457
    def _create(self, cn, newts, name, author, seriesmeta,
458
                metadata=None, insertion_date=None):
459
        start, end = start_end(newts, notz=False)
460
461
462
463
        if start is None:
            assert end is None
            # this is just full of nans
            return None
464
465
466
467
468
        # chop off unwanted nans
        newts = newts.loc[start:end]
        if len(newts) == 0:
            return None

469
470
471
        # at creation time we take an exclusive lock to avoid
        # a deadlock on created tables against the changeset-series fk
        cn.execute(
472
            f'select pg_advisory_xact_lock({self.create_lock_id})'
473
        )
474
475
476
        if metadata:
            metadata = json.dumps(metadata)

477
478
479
480
        self._make_ts_table(cn, name)
        self._register_serie(cn, name, seriesmeta)

        snapshot = Snapshot(cn, self, name)
481
        head = snapshot.create(newts)
482
        start, end = start_end(newts)
483
484

        self._new_revision(
485
            cn, name, head, start, end,
486
487
488
            author, insertion_date, metadata
        )

489
        L.info('first insertion of %s (size=%s) by %s',
490
               name, len(newts), author)
491
492
        return newts

493
    def _update(self, cn, tablename, newts, name, author,
494
                metadata=None, insertion_date=None):
495
496
        self._validate(cn, newts, name)
        snapshot = Snapshot(cn, self, name)
497
498
499
        diff = self.diff(snapshot.last(newts.index.min(),
                                       newts.index.max()),
                         newts)
500
501
        if not len(diff):
            L.info('no difference in %s by %s (for ts of size %s)',
502
                   name, author, len(newts))
503
504
            return

505
        # compute series start/end stamps
506
        tsstart, tsend = start_end(newts)
507
        ival = self.interval(cn, name, notz=True)
508
509
        start = min(tsstart or ival.left, ival.left)
        end = max(tsend or ival.right, ival.right)
510
511
512
513
514
515
516
517
518
519
520
521
522

        if pd.isnull(diff[0]) or pd.isnull(diff[-1]):
            # we *might* be shrinking, let's look at the full series
            # and yes, shrinkers have a slow path
            last = snapshot.last()
            patched = self.patch(last, diff).dropna()
            if not len(patched):
                raise ValueError('complete erasure of a series is forbidden')
            if pd.isnull(diff[0]):
                start = patched.index[0]
            if pd.isnull(diff[-1]):
                end = patched.index[-1]

523
        head = snapshot.update(diff)
524
        self._new_revision(
525
            cn, name, head, start, end,
526
527
            author, insertion_date, metadata
        )
528
        L.info('inserted diff (size=%s) for ts %s by %s',
529
               len(diff), name, author)
530
531
        return diff

532
    def _new_revision(self, cn, name, head, tsstart, tsend,
533
                      author, insertion_date, metadata):
534
        tablename = self._series_to_tablename(cn, name)
535
536
537
538
539
        if insertion_date is not None:
            assert insertion_date.tzinfo is not None
            idate = pd.Timestamp(insertion_date)
        else:
            idate = pd.Timestamp(datetime.utcnow(), tz='UTC')
540
541
542
        latest_idate = self.latest_insertion_date(cn, name)
        if latest_idate:
            assert idate > latest_idate
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
        if metadata:
            metadata = json.dumps(metadata)

        q = insert(
            f'"{self.namespace}.revision"."{tablename}" '
        ).values(
            snapshot=head,
            tsstart=tsstart,
            tsend=tsend,
            author=author,
            insertion_date=idate,
            metadata=metadata
        )
        q.do(cn)

558
    # serie table handling
559

560
    def _make_tablename(self, cn, name):
561
562
563
        """ compute the unqualified (no namespace) table name
        from a serie name, to allow arbitrary serie names
        """
564
        # default
565
        tablename = name
566
        # postgresql table names are limited to 63 chars.
567
568
        if len(name) > 63:
            tablename = hashlib.sha1(name.encode('utf-8')).hexdigest()
569
570

        # collision detection (collision can happen after a rename)
571
        if cn.execute(f'select tablename '
572
                      f'from "{self.namespace}".registry '
573
574
                      f'where tablename = %(seriesname)s',
                      seriesname=name).scalar():
575
            tablename = str(uuid.uuid4())
576

577
        cn.cache['series_tablename'][name] = tablename
578
        return tablename
579

580
    def _series_to_tablename(self, cn, name):
581
582
583
584
        tablename = cn.cache['series_tablename'].get(name)
        if tablename is not None:
            return tablename

585
        tablename = cn.execute(
586
587
588
            f'select tablename from "{self.namespace}".registry '
            f'where seriesname = %(seriesname)s',
            seriesname=name
589
        ).scalar()
590
        if tablename is None:
591
            # bogus series name
592
            return
593
        cn.cache['series_tablename'][name] = tablename
594
595
        return tablename

596
597
    def _make_ts_table(self, cn, name):
        tablename = self._make_tablename(cn, name)
598
599
600
601
602
603
        table = sqlfile(
            SERIESSCHEMA,
            namespace=self.namespace,
            tablename=tablename
        )
        cn.execute(table)
604

605
    def _series_initial_meta(self, cn, name, ts):
606
        index = ts.index
607
        return {
608
609
610
611
612
            'tzaware': tzaware_serie(ts),
            'index_type': index.dtype.name,
            'index_dtype': index.dtype.str,
            'value_dtype': ts.dtypes.str,
            'value_type': ts.dtypes.name
613
614
        }

615
    def _register_serie(self, cn, name, seriesmeta):
616
        sql = (f'insert into "{self.namespace}".registry '
617
               '(seriesname, tablename, metadata) '
618
619
               'values (%s, %s, %s) '
               'returning id')
620
        tablename = self._series_to_tablename(cn, name)
621
622
        regid = cn.execute(
            sql,
623
            name,
624
            tablename,
625
            json.dumps(seriesmeta)
626
        ).scalar()
627

628
629
    # changeset handling

630
    def _previous_cset(self, cn, name, csid):
631
        tablename = self._series_to_tablename(cn, name)
632
633
634
        sql = (f'select id from "{self.namespace}.revision"."{tablename}" '
               'where id < %(csid)s '
               'order by id desc limit 1')
635
636
        return cn.execute(sql, csid=csid).scalar()

637
638
    # insertion handling

639
    def _validate(self, cn, ts, name):
640
641
        if ts.isnull().all():
            # ts erasure
642
            return
643
        tstype = ts.dtype
644
        meta = self.metadata(cn, name)
645
        if tstype != meta['value_type']:
646
            m = (f'Type error when inserting {name}, '
647
                 f'new type is {tstype}, type in base is {meta["value_type"]}')
648
            raise Exception(m)
649
        if ts.index.dtype.name != meta['index_type']:
650
651
652
653
            raise Exception(
                'Incompatible index types: '
                f'ref=`{meta["index_type"]}`, new=`{ts.index.dtype.name}`'
            )
654

655
    def _revisions(self, cn, name,
656
657
658
659
                   from_insertion_date=None,
                   to_insertion_date=None,
                   from_value_date=None,
                   to_value_date=None):
660
        tablename = self._series_to_tablename(cn, name)
661
        q = select(
662
            'id', 'insertion_date'
663
        ).table(
664
            f'"{self.namespace}.revision"."{tablename}"'
665
666
667
668
        )

        if from_insertion_date:
            q.where(
669
                'insertion_date >= %(from_idate)s',
670
671
672
673
                from_idate=from_insertion_date
            )
        if to_insertion_date:
            q.where(
674
                'insertion_date <= %(to_idate)s ',
675
676
677
678
679
680
681
682
683
684
                to_idate=to_insertion_date
            )

        if from_value_date or to_value_date:
            q.where(
                closed_overlaps(from_value_date, to_value_date),
                fromdate=from_value_date,
                todate=to_value_date
            )

685
        q.order('id')
686
687
688
689
690
        return [
            (csid, pd.Timestamp(idate).astimezone('UTC'))
            for csid, idate in q.do(cn).fetchall()
        ]

691

692
693
694
695
class historycache:

    def __init__(self, tsh, cn, name,
                 from_value_date=None,
696
                 to_value_date=None,
697
                 from_insertion_date=None,
698
699
                 to_insertion_date=None,
                 tzaware=True):
700
        self.name = name
701
        self.tzaware = tzaware
702
703
704
        self.hist = tsh.history(
            cn, name,
            from_value_date=from_value_date,
705
            to_value_date=to_value_date,
706
707
            from_insertion_date=from_insertion_date,
            to_insertion_date=to_insertion_date,
708
            _keep_nans=True
709
        )
710
711
712
713
714
715
716
717
718
719
720
        self.idates = list(self.hist.keys())
        self.naive_idates = [
            dt.replace(tzinfo=None)
            for dt in self.idates
        ]

    def _find_nearest_idate(self, revision_date):
        if self.tzaware:
            idates = self.idates
        else:
            idates = self.naive_idates
721
722
723
724
725
726
        idx = bisect_search(idates, revision_date)
        if idx == -1:
            return None
        if idx >= len(idates):
            idx = len(idates) - 1
        return self.idates[idx]
727
728
729
730
731
732
733
734
735

    def get(self, revision_date=None,
            from_value_date=None,
            to_value_date=None):

        if not len(self.hist):
            return pd.Series(name=self.name)

        if revision_date is None:
736
            return list(self.hist.values())[-1].dropna()
737

738
739
740
741
742
        idate = self._find_nearest_idate(revision_date)
        if idate:
            return self.hist[idate].loc[
                from_value_date:to_value_date
            ].dropna()
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775

        return pd.Series(name=self.name)

    def staircase(self, delta,
                  from_value_date=None,
                  to_value_date=None):
        """ compute a series whose value dates are bounded to be
        `delta` time after the insertion dates and where we
        keep the most recent ones
        """
        base = self.get(
            from_value_date=from_value_date,
            to_value_date=to_value_date
        )
        if not len(base):
            return base

        chunks = []
        for vdate in base.index:
            ts = self.get(
                revision_date=vdate - delta,
                from_value_date=vdate,
                to_value_date=vdate
            )
            if ts is not None and len(ts):
                chunks.append(ts)

        ts = pd.Series()
        if chunks:
            ts = pd.concat(chunks)
        ts.name = self.name
        return ts

776
777
778
779
780
781

@deprecated(reason='use the `timeseries` object instead')
class TimeSerie(timeseries):

    get_history = deprecated(timeseries.history)
    get_delta = deprecated(timeseries.staircase)