tsio.py 26.4 KB
Newer Older
1
from datetime import datetime
2
import logging
3
4
import hashlib
import uuid
5
from threading import Lock
6
7
8

import pandas as pd

9
10
from sqlalchemy import Table, Column, Integer, ForeignKey, Index
from sqlalchemy.sql.elements import NONE_NAME
Aurélien Campéas's avatar
Aurélien Campéas committed
11
from sqlalchemy.sql.expression import select, func, desc
Aurélien Campéas's avatar
Aurélien Campéas committed
12
from sqlalchemy.dialects.postgresql import TIMESTAMP
13

14
from tshistory.schema import tsschema
15
from tshistory.util import (
16
    closed_overlaps,
17
18
    num2float,
    SeriesServices,
19
    start_end,
20
    tx,
21
22
    tzaware_serie
)
23
from tshistory.snapshot import Snapshot, TABLES as SNAPTABLES
24

25
L = logging.getLogger('tshistory.tsio')
26
TABLES = {}
27
28


29
class TimeSerie(SeriesServices):
30
    namespace = 'tsh'
31
    schema = None
32
    metadatacache = None
33
34
35
36
37
38
39
40
    metakeys = {
        'tzaware',
        'index_type',
        'index_names',
        'index_dtype',
        'value_dtype',
        'value_type'
    }
41
    registry_map = None
42
    serie_tablename = None
43
    create_lock_id = None
44
    cachelock = Lock()
45
46
47

    def __init__(self, namespace='tsh'):
        self.namespace = namespace
48
49
        self.schema = tsschema(namespace)
        self.schema.define()
50
        self.metadatacache = {}
51
        self.registry_map = {}
52
        self.serie_tablename = {}
53
        self.create_lock_id = sum(ord(c) for c in namespace)
54

55
    @tx
56
    def insert(self, cn, newts, seriename, author,
57
58
               metadata=None,
               _insertion_date=None):
59
        """Create a new revision of a given time series
60

61
        newts: pandas.Series with date index
62
        seriename: str unique identifier of the serie
63
        author: str free-form author name
64
        metadata: optional dict for changeset metadata
65
        """
66
67
68
69
        assert isinstance(newts, pd.Series), 'Not a pd.Series'
        assert isinstance(seriename, str), 'Name not a string'
        assert isinstance(author, str), 'Author not a string'
        assert metadata is None or isinstance(metadata, dict), 'Bad format for metadata'
70
71
        assert (_insertion_date is None or
                isinstance(_insertion_date, datetime)), 'Bad format for insertion date'
72
        assert not newts.index.duplicated().any(), 'There are some duplicates in the index'
73

74
        assert newts.index.notna().all(), 'The index contains NaT entries'
75
76
        if not newts.index.is_monotonic_increasing:
            newts = newts.sort_index()
77

78
        newts = num2float(newts)
79

80
        if not len(newts):
81
            return
82

83
        assert ('<M8[ns]' == newts.index.dtype or
84
                'datetime' in str(newts.index.dtype) and not
85
86
                isinstance(newts.index, pd.MultiIndex))

87
88
        newts.name = seriename
        table = self._get_ts_table(cn, seriename)
89

90
        if table is None:
91
            return self._create(cn, newts, seriename, author,
92
                                metadata, _insertion_date)
93

94
        return self._update(cn, table, newts, seriename, author,
95
                            metadata, _insertion_date)
96

97
    def get(self, cn, seriename, revision_date=None,
98
99
            from_value_date=None, to_value_date=None,
            _keep_nans=False):
100
101
102
103
104
        """Compute and return the serie of a given name

        revision_date: datetime filter to get previous versions of the
        serie

105
        """
Aurélien Campéas's avatar
Aurélien Campéas committed
106
        if not self.exists(cn, seriename):
107
            return
108

109
        csetfilter = []
110
        if revision_date:
111
            csetfilter.append(lambda cset: cset.c.insertion_date <= revision_date)
112
        snap = Snapshot(cn, self, seriename)
113
        _, current = snap.find(csetfilter=csetfilter,
114
115
                               from_value_date=from_value_date,
                               to_value_date=to_value_date)
116

117
        if current is not None and not _keep_nans:
118
            current.name = seriename
119
            current = current[~current.isnull()]
120
        return current
121

122
    def metadata(self, cn, seriename):
123
        """Return metadata dict of timeserie."""
124
125
        if seriename in self.metadatacache:
            return self.metadatacache[seriename]
126
127
        reg = self.schema.registry
        sql = select([reg.c.metadata]).where(
128
            reg.c.seriename == seriename
129
130
        )
        meta = cn.execute(sql).scalar()
131
132
        if meta is not None:
            self.metadatacache[seriename] = meta
133
134
        return meta

135
    @tx
136
    def update_metadata(self, cn, seriename, metadata, internal=False):
137
        assert isinstance(metadata, dict)
138
        assert internal or not set(metadata.keys()) & self.metakeys
139
        meta = self.metadata(cn, seriename)
140
        # remove al but internal stuff
141
142
143
144
145
        newmeta = {
            key: meta[key]
            for key in self.metakeys
            if meta.get(key) is not None
        }
146
        newmeta.update(metadata)
147
148
        reg = self.schema.registry
        sql = reg.update().where(
149
            reg.c.seriename == seriename
150
        ).values(metadata=newmeta)
151
        self.metadatacache.pop(seriename)
152
153
        cn.execute(sql)

154
155
156
157
158
159
160
    def changeset_metadata(self, cn, csid):
        sql = 'select metadata from "{ns}".changeset where id = {id}'.format(
            ns=self.namespace,
            id=csid
        )
        return cn.execute(sql).scalar()

161
162
163
    def type(self, cn, name):
        return 'primary'

164
    @tx
165
    def get_history(self, cn, seriename,
166
                    from_insertion_date=None,
167
168
                    to_insertion_date=None,
                    from_value_date=None,
169
                    to_value_date=None,
170
                    deltabefore=None,
171
                    deltaafter=None,
172
173
                    diffmode=False,
                    _keep_nans=False):
174
        table = self._get_ts_table(cn, seriename)
175
176
177
        if table is None:
            return

178
        cset = self.schema.changeset
179
180
181
182
183
184
185
186
187
188
189
190
        revsql = select(
            [cset.c.id, cset.c.insertion_date]
        ).order_by(
            cset.c.id
        ).where(
            table.c.cset == cset.c.id
        )

        if from_insertion_date:
            revsql = revsql.where(cset.c.insertion_date >= from_insertion_date)
        if to_insertion_date:
            revsql = revsql.where(cset.c.insertion_date <= to_insertion_date)
191

192
193
194
195
196
197
198
199
200
        if from_value_date or to_value_date:
            revsql = revsql.where(
                closed_overlaps(from_value_date, to_value_date)
            )

        revs = cn.execute(
            revsql,
            {'fromdate': from_value_date, 'todate': to_value_date}
        ).fetchall()
201
        if not revs:
202
            return {}
203

204
205
206
207
208
209
        if diffmode:
            # compute the previous serie value
            first_csid = revs[0][0]
            previous_csid = self._previous_cset(cn, seriename, first_csid)
            revs.insert(0, (previous_csid, None))

210
        snapshot = Snapshot(cn, self, seriename)
211
        series = []
212
213
        if (deltabefore, deltaafter) != (None, None):
            for csid, idate in revs:
214
215
                from_date = None
                to_date = None
216
                if deltabefore is not None:
217
                    from_date = idate - deltabefore
218
                if deltaafter is not None:
219
                    to_date = idate + deltaafter
220
221
222
                series.append((
                    idate,
                    snapshot.find(csetfilter=[lambda cset: cset.c.id == csid],
223
224
                                  from_value_date=from_date,
                                  to_value_date=to_date)[1]
225
226
227
228
229
230
                ))
        else:
            series = snapshot.findall(revs,
                                      from_value_date,
                                      to_value_date
            )
231

232
233
        if diffmode:
            diffs = []
Aurélien Campéas's avatar
Aurélien Campéas committed
234
            for (_revdate_a, serie_a), (revdate_b, serie_b) in zip(series, series[1:]):
235
236
237
238
239
240
241
                if serie_a is None:
                    # when we scan the entirety of the history: there exists no "previous" serie
                    # we therefore consider the first serie as a diff to the "null" serie
                    diffs.append((revdate_b, serie_b))
                else:
                    diffs.append((revdate_b, self.diff(serie_a, serie_b)))
            series = diffs
242
243
        else:
            series = [
244
                (idate, ts if _keep_nans else ts.dropna() )
245
246
                 for idate, ts in series
            ]
247

248
        return {
249
            pd.Timestamp(idate).astimezone('UTC'): serie
250
251
            for idate, serie in series
        }
252

253
    @tx
254
255
256
    def get_delta(self, cn, seriename, delta,
                  from_value_date=None,
                  to_value_date=None):
257
258
259
260
        """ compute a series whose value dates are bounded to be
        `delta` time after the insertion dates and where we
        keep the most recent ones
        """
261
        histo = self.get_history(
262
263
            cn, seriename, deltabefore=-delta,
            from_value_date=from_value_date,
264
265
            to_value_date=to_value_date,
            _keep_nans=True
266
        )
267
268
        if histo is None:
            return None
269

270
271
272
273
274
275
276
277
        vimap = {}
        vvmap = {}
        for idate, series in histo.items():
            for vdate, value in series.iteritems():
                if vdate not in vimap or vimap[vdate] < idate:
                    vimap[vdate] = idate
                    vvmap[vdate] = value

278
        ts = pd.Series(vvmap).sort_index().loc[from_value_date:to_value_date]
279
        ts.name = seriename
280
        return ts.dropna()
281

282
283
    def exists(self, cn, seriename):
        return self._get_ts_table(cn, seriename) is not None
284

285
    def latest_insertion_date(self, cn, seriename):
286
        cset = self.schema.changeset
287
        tstable = self._get_ts_table(cn, seriename)
288
289
290
291
292
293
294
295
        sql = select(
            [func.max(cset.c.insertion_date)]
        ).where(
            tstable.c.cset == cset.c.id
        )
        return pd.Timestamp(
            cn.execute(sql).scalar()
        ).astimezone('UTC')
296

297
298
    def insertion_dates(self, cn, seriename,
                        fromdate=None, todate=None):
299
300
301
302
303
304
305
306
307
        cset = self.schema.changeset
        tstable = self._get_ts_table(cn, seriename)
        sql = select(
            [cset.c.insertion_date]
        ).where(
            tstable.c.cset == cset.c.id
        ).order_by(
            cset.c.id
        )
308
309
310
311
312
313

        if fromdate:
            sql = sql.where(cset.c.insertion_date >= fromdate)
        if todate:
            sql = sql.where(cset.c.insertion_date <= todate)

314
315
316
317
318
        return [
            pd.Timestamp(idate).astimezone('UTC')
            for idate, in cn.execute(sql).fetchall()
        ]

319
320
321
322
    def last_id(self, cn, seriename):
        snapshot = Snapshot(cn, self, seriename)
        return snapshot.last_id()

323
324
325
    def changeset_at(self, cn, seriename, revdate, mode='strict'):
        assert mode in ('strict', 'before', 'after')
        cset = self.schema.changeset
326
        table = self._table_definition_for(cn, seriename)
327
328
        sql = select([table.c.cset]).where(
            table.c.cset == cset.c.id
329
330
331
332
333
334
335
336
337
        )
        if mode == 'strict':
            sql = sql.where(cset.c.insertion_date == revdate)
        elif mode == 'before':
            sql = sql.where(cset.c.insertion_date <= revdate)
        else:
            sql = sql.where(cset.c.insertion_date >= revdate)
        return cn.execute(sql).scalar()

338
339
340
341
342
343
344
345
346
347
348
    @tx
    def rename(self, cn, oldname, newname):
        reg = self.schema.registry
        sql = reg.update().where(
            reg.c.seriename == oldname
        ).values(
            seriename=newname
        )
        cn.execute(sql)
        self._resetcaches()

349
    @tx
350
    def delete(self, cn, seriename):
351
352
353
        if not self.exists(cn, seriename):
            print('not deleting unknown series', seriename, self.namespace)
            return
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
        # changeset will keep ghost entries
        # we cleanup changeset series, then registry
        # then we drop the two remaining tables
        # cn *must* be a transaction scope
        rid, tablename = cn.execute(
            'select id, table_name from "{}".registry '
            'where seriename = %(seriename)s'.format(self.namespace),
            seriename=seriename
        ).fetchone()
        # drop series tables
        cn.execute(
            'drop table "{}.timeserie"."{}" cascade'.format(self.namespace, tablename)
        )
        cn.execute(
            'drop table "{}.snapshot"."{}" cascade'.format(self.namespace, tablename)
        )
        # cleanup changesets table
        cn.execute('with csets as ('
                   ' select cset from "{ns}".changeset_series '
                   ' where serie = %(rid)s'
                   ') '
                   'delete from "{ns}".changeset as cset using csets '
                   'where cset.id = csets.cset'.format(ns=self.namespace),
                   rid=rid
        )
        cn.execute('delete from "{}".registry '
                   'where id = %(rid)s'.format(self.namespace),
                   rid=rid)
        # -> this will transitively cleanup state changeset_series entries
        self._resetcaches()
384
        print('deleted', seriename, self.namespace)
385

386
    @tx
387
388
389
390
391
392
393
394
395
    def strip(self, cn, seriename, csid):
        logs = self.log(cn, fromrev=csid, names=(seriename,))
        assert logs

        # put stripping info in the metadata
        cset = self.schema.changeset
        cset_serie = self.schema.changeset_series
        for log in logs:
            # update changeset.metadata
396
            metadata = self.changeset_metadata(cn, log['rev']) or {}
397
398
399
400
401
402
            metadata['tshistory.info'] = 'got stripped from {}'.format(csid)
            sql = cset.update().where(cset.c.id == log['rev']
            ).values(metadata=metadata)
            cn.execute(sql)
            # delete changset_serie item
            sql = cset_serie.delete().where(
403
                cset_serie.c.cset == log['rev']
404
            ).where(
405
                cset_serie.c.serie == self._name_to_regid(cn, seriename)
406
407
408
409
            )
            cn.execute(sql)

        # wipe the diffs
410
        table = self._table_definition_for(cn, seriename)
411
        cn.execute(table.delete().where(table.c.cset >= csid))
412

413
    def info(self, cn):
414
415
        """Gather global statistics on the current tshistory repository
        """
416
        sql = 'select count(*) from "{}".registry'.format(self.namespace)
417
        stats = {'series count': cn.execute(sql).scalar()}
418
        sql = 'select max(id) from "{}".changeset'.format(self.namespace)
419
        stats['changeset count'] = cn.execute(sql).scalar()
420
421
422
        sql = 'select distinct seriename from "{}".registry order by seriename'.format(
            self.namespace
        )
423
        stats['serie names'] = [row for row, in cn.execute(sql).fetchall()]
424
425
        return stats

426
    def log(self, cn, limit=0, names=None, authors=None,
427
            stripped=False,
428
429
            fromrev=None, torev=None,
            fromdate=None, todate=None):
430
431
432
433
        """Build a structure showing the history of all the series in the db,
        per changeset, in chronological order.
        """
        log = []
434
435
436
437
438
        cset, cset_series, reg = (
            self.schema.changeset,
            self.schema.changeset_series,
            self.schema.registry
        )
439

440
        sql = select([cset.c.id, cset.c.author, cset.c.insertion_date, cset.c.metadata]
441
        ).distinct().order_by(desc(cset.c.id))
442
443
444

        if limit:
            sql = sql.limit(limit)
445
        if names:
446
            sql = sql.where(reg.c.seriename.in_(names))
447
448
        if authors:
            sql = sql.where(cset.c.author.in_(authors))
449
450
451
452
        if fromrev:
            sql = sql.where(cset.c.id >= fromrev)
        if torev:
            sql = sql.where(cset.c.id <= torev)
453
454
455
456
        if fromdate:
            sql = sql.where(cset.c.insertion_date >= fromdate)
        if todate:
            sql = sql.where(cset.c.insertion_date <= todate)
457
458
459
460
        if stripped:
            # outerjoin to show dead things
            sql = sql.select_from(cset.outerjoin(cset_series))
        else:
461
            sql = sql.where(cset.c.id == cset_series.c.cset
462
            ).where(cset_series.c.serie == reg.c.id)
463

464
        rset = cn.execute(sql)
465
        for csetid, author, revdate, meta in rset.fetchall():
466
467
            log.append({'rev': csetid, 'author': author,
                        'date': pd.Timestamp(revdate, tz='utc'),
468
                        'meta': meta or {},
469
                        'names': self._changeset_series(cn, csetid)})
470

471
        log.sort(key=lambda rev: rev['rev'])
472
473
        return log

474
    def interval(self, cn, seriename, notz=False):
475
476
477
478
479
480
481
482
        tablename = self._serie_to_tablename(cn, seriename)
        if tablename is None:
            raise ValueError(f'no such serie: {seriename}')
        sql = (f'select start, "end" '
               f'from "{self.namespace}.timeserie"."{tablename}" '
               f'order by cset desc limit 1')
        res = cn.execute(sql).fetchone()
        start, end = res.start, res.end
483
        if self.metadata(cn, seriename).get('tzaware') and not notz:
484
485
486
            start, end = pd.Timestamp(start, tz='UTC'), pd.Timestamp(end, tz='UTC')
        return pd.Interval(left=start, right=end, closed='both')

487
488
    # /API
    # Helpers
489

490
491
    # creation / update

492
    def _create(self, cn, newts, seriename, author,
493
                metadata=None, insertion_date=None):
494
        start, end = start_end(newts, notz=False)
495
496
497
498
        if start is None:
            assert end is None
            # this is just full of nans
            return None
499
500
501
502
503
        # chop off unwanted nans
        newts = newts.loc[start:end]
        if len(newts) == 0:
            return None

504
505
506
507
508
        # at creation time we take an exclusive lock to avoid
        # a deadlock on created tables against the changeset-series fk
        cn.execute(
            'select pg_advisory_xact_lock({})'.format(self.create_lock_id)
        )
509
        self._register_serie(cn, seriename, newts)
510
        snapshot = Snapshot(cn, self, seriename)
511
        csid = self._newchangeset(cn, author, insertion_date, metadata)
512
        head = snapshot.create(newts)
513
        start, end = start_end(newts)
514
515
        value = {
            'cset': csid,
516
517
518
            'snapshot': head,
            'start': start,
            'end': end
519
        }
Aurélien Campéas's avatar
Aurélien Campéas committed
520
        table = self._make_ts_table(cn, seriename)
521
        cn.execute(table.insert().values(value))
522
        self._finalize_insertion(cn, csid, seriename)
523
        L.info('first insertion of %s (size=%s) by %s',
524
               seriename, len(newts), author)
525
526
        return newts

527
    def _update(self, cn, table, newts, seriename, author,
528
                metadata=None, insertion_date=None):
529
530
        self._validate(cn, newts, seriename)
        snapshot = Snapshot(cn, self, seriename)
531
532
533
        diff = self.diff(snapshot.last(newts.index.min(),
                                       newts.index.max()),
                         newts)
534
535
        if not len(diff):
            L.info('no difference in %s by %s (for ts of size %s)',
536
                   seriename, author, len(newts))
537
538
            return

539
        # compute series start/end stamps
540
        tsstart, tsend = start_end(newts)
541
542
543
        ival = self.interval(cn, seriename, notz=True)
        start = min(tsstart or ival.left, ival.left)
        end = max(tsend or ival.right, ival.right)
544
545
546
547
548
549
550
551
552
553
554
555
556
557

        if pd.isnull(diff[0]) or pd.isnull(diff[-1]):
            # we *might* be shrinking, let's look at the full series
            # and yes, shrinkers have a slow path
            last = snapshot.last()
            patched = self.patch(last, diff).dropna()
            if not len(patched):
                raise ValueError('complete erasure of a series is forbidden')
            if pd.isnull(diff[0]):
                start = patched.index[0]
            if pd.isnull(diff[-1]):
                end = patched.index[-1]

        csid = self._newchangeset(cn, author, insertion_date, metadata)
558
559
560
        head = snapshot.update(diff)
        value = {
            'cset': csid,
561
562
563
            'snapshot': head,
            'start': start,
            'end': end
564
565
        }
        cn.execute(table.insert().values(value))
566
        self._finalize_insertion(cn, csid, seriename)
567
568

        L.info('inserted diff (size=%s) for ts %s by %s',
569
               len(diff), seriename, author)
570
571
        return diff

572
    # serie table handling
573

574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
    def _make_tablename(self, cn, seriename):
        """ compute the unqualified (no namespace) table name
        from a serie name, to allow arbitrary serie names
        """
        # postgresql table names are limited to 63 chars.
        if len(seriename) > 63:
            seriename = hashlib.sha1(seriename.encode('utf-8')).hexdigest()

        # collision detection (collision can happen after a rename)
        if cn.execute(f'select table_name '
                      f'from "{self.namespace}".registry '
                      f'where table_name = %(seriename)s',
                      seriename=seriename).scalar():
            return str(uuid.uuid4())

        return seriename

591
592
593
594
    def _serie_to_tablename(self, cn, seriename):
        tablename = self.serie_tablename.get(seriename)
        if tablename is not None:
            return tablename
595

596
597
598
599
600
601
602
603
604
605
606
607
608
        reg = self.schema.registry
        sql = select([reg.c.table_name]).where(reg.c.seriename == seriename)
        tablename = cn.execute(sql).scalar()
        if tablename is None:
            # creation time
            return
        self.serie_tablename[seriename] = tablename
        return tablename

    def _table_definition_for(self, cn, seriename):
        tablename = self._serie_to_tablename(cn, seriename)
        if tablename is None:
            # creation time
Aurélien Campéas's avatar
Aurélien Campéas committed
609
            tablename = self._make_tablename(cn, seriename)
610
611
        fq_tablename = '{}.timeserie.{}'.format(self.namespace, tablename)
        table = TABLES.get(fq_tablename)
612
        if table is None:
613
614
            TABLES[fq_tablename] = table = Table(
                tablename, self.schema.meta,
615
616
617
                Column('id', Integer, primary_key=True),
                Column('cset', Integer,
                       ForeignKey('{}.changeset.id'.format(self.namespace)),
618
                       nullable=False),
619
620
                Column('start', TIMESTAMP, nullable=False),
                Column('end', TIMESTAMP, nullable=False),
621
622
623
                Column('snapshot', Integer,
                       ForeignKey('{}.snapshot.{}.id'.format(
                           self.namespace,
624
625
626
                           tablename))),
                Index(NONE_NAME, 'cset'),
                Index(NONE_NAME, 'snapshot'),
627
628
                Index(NONE_NAME, 'start'),
                Index(NONE_NAME, 'end'),
629
                schema='{}.timeserie'.format(self.namespace),
630
                keep_existing=True
631
632
            )
        return table
633

Aurélien Campéas's avatar
Aurélien Campéas committed
634
    def _make_ts_table(self, cn, seriename):
635
        table = self._table_definition_for(cn, seriename)
636
        table.create(cn)
637
638
639
        return table

    def _register_serie(self, cn, seriename, ts):
640
641
        index = ts.index
        inames = [name for name in index.names if name]
642
        sql = self.schema.registry.insert().values(
643
            seriename=seriename,
Aurélien Campéas's avatar
Aurélien Campéas committed
644
            table_name=self._make_tablename(cn, seriename),
645
646
647
648
            metadata={
                'tzaware': tzaware_serie(ts),
                'index_type': index.dtype.name,
                'index_names': inames,
649
650
                'index_dtype': index.dtype.str,
                'value_dtype': ts.dtypes.str,
651
                'value_type': ts.dtypes.name
652
            }
653
        )
654
655
        regid = cn.execute(sql).inserted_primary_key[0]
        self.registry_map[seriename] = regid
656

657
    def _get_ts_table(self, cn, seriename):
658
659
        tablename = self._serie_to_tablename(cn, seriename)
        if tablename:
Aurélien Campéas's avatar
Aurélien Campéas committed
660
            return self._table_definition_for(cn, seriename)
661

662
663
    # changeset handling

664
    def _newchangeset(self, cn, author, insertion_date=None, metadata=None):
665
        table = self.schema.changeset
666
667
668
        if insertion_date is not None:
            assert insertion_date.tzinfo is not None
        idate = pd.Timestamp(insertion_date or datetime.utcnow(), tz='UTC')
669
670
        sql = table.insert().values(
            author=author,
671
            metadata=metadata,
672
            insertion_date=idate)
673
        return cn.execute(sql).inserted_primary_key[0]
674

675
    def _changeset_series(self, cn, csid):
676
        cset_serie = self.schema.changeset_series
677
678
        reg = self.schema.registry
        sql = select(
679
            [reg.c.seriename]
680
681
        ).where(cset_serie.c.cset == csid
        ).where(cset_serie.c.serie == reg.c.id)
682

683
        return [
684
            row.seriename
685
686
            for row in cn.execute(sql).fetchall()
        ]
687

688
689
690
691
692
693
694
    def _previous_cset(self, cn, seriename, csid):
        tablename = self._serie_to_tablename(cn, seriename)
        sql = ('select cset from "{}.timeserie"."{}" '
               'where cset < %(csid)s '
               'order by cset desc limit 1').format(self.namespace, tablename)
        return cn.execute(sql, csid=csid).scalar()

695
696
    # insertion handling

697
    def _validate(self, cn, ts, seriename):
698
699
        if ts.isnull().all():
            # ts erasure
700
            return
701
        tstype = ts.dtype
702
        meta = self.metadata(cn, seriename)
703
        if tstype != meta['value_type']:
704
            m = 'Type error when inserting {}, new type is {}, type in base is {}'.format(
705
                seriename, tstype, meta['value_type'])
706
            raise Exception(m)
707
        if ts.index.dtype.name != meta['index_type']:
708
            raise Exception('Incompatible index types')
709

710
711
    def _name_to_regid(self, cn, seriename):
        regid = self.registry_map.get(seriename)
712
713
714
715
        if regid is not None:
            return regid

        registry = self.schema.registry
716
        sql = select([registry.c.id]).where(registry.c.seriename == seriename)
717
        regid = self.registry_map[seriename] = cn.execute(sql).scalar()
718
719
        return regid

720
    def _finalize_insertion(self, cn, csid, seriename):
721
722
723
        table = self.schema.changeset_series
        sql = table.insert().values(
            cset=csid,
724
            serie=self._name_to_regid(cn, seriename)
725
726
        )
        cn.execute(sql)
727

728
    def _resetcaches(self):
729
730
731
732
733
734
        with self.cachelock:
            TABLES.clear()
            SNAPTABLES.clear()
            self.metadatacache.clear()
            self.registry_map.clear()
            self.serie_tablename.clear()