DataStore

DataStore是由JetPack推出来代替SP数据持久化方案;

DS优点:

  • 基于Flow,保证线程安全性
  • 可以监听到成功和失败
  • 自动完成 SharedPreferences 迁移到 DataStore,保证数据一致性,不会造成数据损坏

您可以使用 runBlocking() 从 DataStore 同步读取数据

dataStore 构造:

createDataStore():

fun Context.createDataStore(
    name: String,
    corruptionHandler: ReplaceFileCorruptionHandler? = null,
    migrations: List> = listOf(),
    scope: CoroutineScope = CoroutineScope(Dispatchers.IO + SupervisorJob())
): DataStore =
    PreferenceDataStoreFactory.create(
        produceFile = {
            File(this.filesDir, "datastore/$name.preferences_pb")
        },
        corruptionHandler = corruptionHandler,
        migrations = migrations,
        scope = scope
    )

PreferenceDataStoreFactory.create()

    fun create(
        produceFile: () -> File,
        corruptionHandler: ReplaceFileCorruptionHandler? = null,
        migrations: List> = listOf(),
        scope: CoroutineScope = CoroutineScope(Dispatchers.IO + SupervisorJob())
    ): DataStore {
        val delegate = DataStoreFactory.create(
            produceFile = {
                val file = produceFile()
                check(file.extension == PreferencesSerializer.fileExtension) {
                    "File extension for file: $file does not match required extension for" +
                            " Preferences file: ${PreferencesSerializer.fileExtension}"
                }
                file
            },
            serializer = PreferencesSerializer,
            corruptionHandler = corruptionHandler,
            migrations = migrations,
            scope = scope
        )
        return PreferenceDataStore(delegate)
    }
}

DataStoreFactory.create()

    fun  create(
        produceFile: () -> File,
        serializer: Serializer,
        corruptionHandler: ReplaceFileCorruptionHandler? = null,
        migrations: List> = listOf(),
        scope: CoroutineScope = CoroutineScope(Dispatchers.IO + SupervisorJob())
    ): DataStore =
        SingleProcessDataStore(
            produceFile = produceFile,
            serializer = serializer,
            corruptionHandler = corruptionHandler ?: NoOpCorruptionHandler(),
            initTasksList = listOf(DataMigrationInitializer.getInitializer(migrations)),
            scope = scope
        )

put

       dataStore.edit {
           it[intPK] = 2
       }

suspend fun DataStore.edit(
    transform: suspend (MutablePreferences) -> Unit
): Preferences {
    return this.updateData {
        // It's safe to return MutablePreferences since we make a defensive copy in
        // PreferencesDataStore.updateData()
        it.toMutablePreferences().apply { transform(this) }
    }
}

调用dataStore的edit方法进行写入,最终会调用updateData();

SingleProcessDataStore.updateData()

    override suspend fun updateData(transform: suspend (t: T) -> T): T {
        val ack = CompletableDeferred()
        val dataChannel = downstreamChannel()
        val updateMsg = Message.Update(transform, ack, dataChannel)

        actor.send(updateMsg)

        // If no read has succeeded yet, we need to wait on the result of the next read so we can
        // bubble exceptions up to the caller. Read exceptions are not bubbled up through ack.
        if (dataChannel.valueOrNull == null) {
            dataChannel.asFlow().first()
        }

        // Wait with same scope as the actor, so we're not waiting on a cancelled actor.
        return withContext(scope.coroutineContext) { ack.await() }
    }

所有的具体操作都通过anchor来实现

    /**
     * Consumes messages. All state changes should happen within actor.
     */
    private val actor: SendChannel> = scope.actor(
        capacity = UNLIMITED
    ) {
        try {
            messageConsumer@ for (msg in channel) {
                if (msg.dataChannel.isClosedForSend) {
                    // The message was sent with an old, now closed, dataChannel. This means that
                    // our read failed.
                    continue@messageConsumer
                }

                try {
                    //1. 从文件中读取所有数据到dataChannel中
                    readAndInitOnce(msg.dataChannel)
                } catch (ex: Throwable) {
                    resetDataChannel(ex)
                    continue@messageConsumer
                }

                // We have successfully read data and sent it to downstreamChannel.
                //2. 将数据写入到文件中
                if (msg is Message.Update) {
                    msg.ack.completeWith(
                        runCatching {
                            transformAndWrite(msg.transform, downstreamChannel())
                        }
                    )
                }
            }
        } finally {
            // The scope has been cancelled. Cancel downstream in case there are any collectors
            // still active.
            downstreamChannel().cancel()
        }
    }

Message消息有两种类型:Update和Read,处理两种消息时都会执行readAndInitOnce()函数从文件中读取所有数据到dataChannel中,只有在处理Update消息时才会执行transformAndWrite()函数将数据写入到文件中。

1. readAndInitOnce()
    private suspend fun readAndInitOnce(dataChannel: ConflatedBroadcastChannel) {
        if (dataChannel.valueOrNull != null) {
            // If we already have cached data, we don't try to read it again.
            return
        }

        val updateLock = Mutex()
        var initData = readDataOrHandleCorruption()

        var initializationComplete: Boolean = false

        // TODO(b/151635324): Consider using Context Element to throw an error on re-entrance.
        val api = object : InitializerApi {
            override suspend fun updateData(transform: suspend (t: T) -> T): T {
                return updateLock.withLock() {
                    if (initializationComplete) {
                        throw IllegalStateException(
                            "InitializerApi.updateData should not be " +
                                    "called after initialization is complete."
                        )
                    }

                    val newData = transform(initData)
                    if (newData != initData) {
                        writeData(newData)
                        initData = newData
                    }

                    initData
                }
            }
        }

        initTasks?.forEach { it(api) }
        initTasks = null // Init tasks have run successfully, we don't need them anymore.
        updateLock.withLock {
            initializationComplete = true
        }

        dataChannel.offer(initData)
    }
2. transformAndWrite()
    private suspend fun transformAndWrite(
        transform: suspend (t: T) -> T,
        /**
         * This is the channel that contains the data that will be used for the transformation.
         * It *must* already have a value -- otherwise this will throw IllegalStateException.
         * Once the transformation is completed and data is durably persisted to disk, and the new
         * value will be offered to this channel.
         */
        updateDataChannel: ConflatedBroadcastChannel
    ): T {
        val curData = updateDataChannel.value
        val newData = transform(curData)
        return if (curData == newData) {
            curData
        } else {
            writeData(newData)
            updateDataChannel.offer(newData)
            newData
        }
    }

    /**
     * Internal only to prevent creation of synthetic accessor function. Do not call this from
     * outside this class.
     */
    internal fun writeData(newData: T) {
        file.createParentDirectories()

        val scratchFile = File(file.absolutePath + SCRATCH_SUFFIX)
        try {
            FileOutputStream(scratchFile).use { stream ->
                serializer.writeTo(newData, stream)
                stream.fd.sync()
                // TODO(b/151635324): fsync the directory, otherwise a badly timed crash could
                //  result in reverting to a previous state.
            }
            if (!scratchFile.renameTo(file)) {
                throw IOException("$scratchFile could not be renamed to $file")
            }
        } catch (ex: IOException) {
            if (scratchFile.exists()) {
                scratchFile.delete()
            }
            throw ex
        }
    }

Flow

https://blog.csdn.net/vitaviva/article/details/104103958?utm_medium=distribute.pc_relevant.none-task-blog-baidujs_title-0&spm=1001.2101.3001.4242

https://blog.csdn.net/weixin_40888127/article/details/108886652

https://blog.csdn.net/u013064109/article/details/80588034

Get

由于DataStore采用Coroutine的Flow来实现的,所以DataStore不提供主动式的读取,这一点和SharedPreferences有区别,DataStore提供了被动式的读取方式,即每写入一个数据,均会执行Flow中collect代码块中的代码,可以在这里对写入的数据作缓存。在SingleProcessDataStore提供一个类型为Flow的data成员变量给外界使用:

       var flow = dataStore.data.map {
           it[intPK] ?:0
       }

SingleProcessDataStore.flow

    override val data: Flow = flow {
        val curChannel = downstreamChannel()
        actor.offer(Message.Read(curChannel))
        emitAll(curChannel.asFlow())
    }

这里通过anchor发送了一个read的消息,所以会调用上文anchor初始化处的readAndInitOnce()

你可能感兴趣的:(DataStore)