1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

use crate::api::places_api::{ConnectionType, GLOBAL_STATE_META_KEY};
use crate::db::PlacesDb;
use crate::error::*;
use crate::storage::history::{delete_everything, history_sync::reset};
use rusqlite::types::{FromSql, ToSql};
use rusqlite::Connection;
use sql_support::SqlInterruptScope;
use std::ops::Deref;
use sync15::telemetry;
use sync15::{
    extract_v1_state, CollSyncIds, CollectionRequest, IncomingChangeset, OutgoingChangeset,
    ServerTimestamp, Store, StoreSyncAssociation,
};
use sync_guid::Guid;

use super::plan::{apply_plan, finish_plan};
use super::MAX_INCOMING_PLACES;

pub const LAST_SYNC_META_KEY: &str = "history_last_sync_time";
// Note that all engines in this crate should use a *different* meta key
// for the global sync ID, because engines are reset individually.
pub const GLOBAL_SYNCID_META_KEY: &str = "history_global_sync_id";
pub const COLLECTION_SYNCID_META_KEY: &str = "history_sync_id";

// A HistoryStore is short-lived and constructed each sync by something which
// owns the connection and ClientInfo.
pub struct HistoryStore<'a> {
    pub db: &'a PlacesDb,
    interruptee: &'a SqlInterruptScope,
}

impl<'a> HistoryStore<'a> {
    pub fn new(db: &'a PlacesDb, interruptee: &'a SqlInterruptScope) -> Self {
        assert_eq!(db.conn_type(), ConnectionType::Sync);
        Self { db, interruptee }
    }

    fn put_meta(&self, key: &str, value: &dyn ToSql) -> Result<()> {
        crate::storage::put_meta(self.db, key, value)
    }

    fn get_meta<T: FromSql>(&self, key: &str) -> Result<Option<T>> {
        crate::storage::get_meta(self.db, key)
    }

    fn do_apply_incoming(
        &self,
        inbound: IncomingChangeset,
        telem: &mut telemetry::Engine,
    ) -> Result<OutgoingChangeset> {
        let timestamp = inbound.timestamp;
        let outgoing = {
            let mut incoming_telemetry = telemetry::EngineIncoming::new();
            let result = apply_plan(&self.db, inbound, &mut incoming_telemetry, self.interruptee);
            telem.incoming(incoming_telemetry);
            result
        }?;
        // write the timestamp now, so if we are interrupted creating outgoing
        // changesets we don't need to re-reconcile what we just did.
        self.put_meta(LAST_SYNC_META_KEY, &(timestamp.as_millis() as i64))?;
        Ok(outgoing)
    }

    fn do_sync_finished(
        &self,
        new_timestamp: ServerTimestamp,
        records_synced: Vec<Guid>,
    ) -> Result<()> {
        log::info!(
            "sync completed after uploading {} records",
            records_synced.len()
        );
        finish_plan(&self.db)?;

        // write timestamp to reflect what we just wrote.
        self.put_meta(LAST_SYNC_META_KEY, &(new_timestamp.as_millis() as i64))?;

        self.db.pragma_update(None, "wal_checkpoint", &"PASSIVE")?;

        Ok(())
    }

    /// A utility we can kill by the end of 2019 ;) Or even mid-2019?
    /// Note that this has no `self` - it just takes a connection. This is to
    /// ease the migration process, because this needs to be executed before
    /// bookmarks sync, otherwise the shared, persisted global state may be
    /// written by bookmarks before we've had a chance to migrate `declined`
    /// over.
    pub fn migrate_v1_global_state(db: &PlacesDb) -> Result<()> {
        if let Some(old_state) = crate::storage::get_meta(db, "history_global_state")? {
            log::info!("there's old global state - migrating");
            let tx = db.begin_transaction()?;
            let (new_sync_ids, new_global_state) = extract_v1_state(old_state, "history");
            if let Some(sync_ids) = new_sync_ids {
                crate::storage::put_meta(db, GLOBAL_SYNCID_META_KEY, &sync_ids.global)?;
                crate::storage::put_meta(db, COLLECTION_SYNCID_META_KEY, &sync_ids.coll)?;
                log::info!("migrated the sync IDs");
            }
            if let Some(new_global_state) = new_global_state {
                // The global state is truly global, but both "history" and "places"
                // are going to write it - which is why it's important this
                // function is run before bookmarks is synced.
                crate::storage::put_meta(db, GLOBAL_STATE_META_KEY, &new_global_state)?;
                log::info!("migrated the global state");
            }
            crate::storage::delete_meta(db, "history_global_state")?;
            tx.commit()?;
        }
        Ok(())
    }
}

impl<'a> Deref for HistoryStore<'a> {
    type Target = Connection;
    #[inline]
    fn deref(&self) -> &Connection {
        &self.db
    }
}

impl<'a> Store for HistoryStore<'a> {
    fn collection_name(&self) -> std::borrow::Cow<'static, str> {
        "history".into()
    }

    fn apply_incoming(
        &self,
        inbound: Vec<IncomingChangeset>,
        telem: &mut telemetry::Engine,
    ) -> anyhow::Result<OutgoingChangeset> {
        assert_eq!(inbound.len(), 1, "history only requests one item");
        let inbound = inbound.into_iter().next().unwrap();
        Ok(self.do_apply_incoming(inbound, telem)?)
    }

    fn sync_finished(
        &self,
        new_timestamp: ServerTimestamp,
        records_synced: Vec<Guid>,
    ) -> anyhow::Result<()> {
        self.do_sync_finished(new_timestamp, records_synced)?;
        Ok(())
    }

    fn get_collection_requests(
        &self,
        server_timestamp: ServerTimestamp,
    ) -> anyhow::Result<Vec<CollectionRequest>> {
        let since = ServerTimestamp(
            self.get_meta::<i64>(LAST_SYNC_META_KEY)?
                .unwrap_or_default(),
        );
        Ok(if since == server_timestamp {
            vec![]
        } else {
            vec![CollectionRequest::new("history")
                .full()
                .newer_than(since)
                .limit(MAX_INCOMING_PLACES)]
        })
    }

    fn get_sync_assoc(&self) -> anyhow::Result<StoreSyncAssociation> {
        let global = self.get_meta(GLOBAL_SYNCID_META_KEY)?;
        let coll = self.get_meta(COLLECTION_SYNCID_META_KEY)?;
        Ok(if let (Some(global), Some(coll)) = (global, coll) {
            StoreSyncAssociation::Connected(CollSyncIds { global, coll })
        } else {
            StoreSyncAssociation::Disconnected
        })
    }

    fn reset(&self, assoc: &StoreSyncAssociation) -> anyhow::Result<()> {
        reset(&self.db, assoc)?;
        Ok(())
    }

    fn wipe(&self) -> anyhow::Result<()> {
        delete_everything(&self.db)?;
        Ok(())
    }
}