Hyperledger-Fabric源码分析(ledger-blockstore)

Hyperledger-Fabric源码分析(底层存储-BlockStore)

这里将fabric存储部分最重要的blockstore剖析下,看下是怎样的实现能托管账本数据。

blockfileMgr

首先BlockStore对外的能力输出需要blockfileMgr来托管,看下它新建做些什么?

新建

func newBlockfileMgr(id string, conf *Conf, indexConfig *blkstorage.IndexConfig, indexStore *leveldbhelper.DBHandle) *blockfileMgr {
   logger.Debugf("newBlockfileMgr() initializing file-based block storage for ledger: %s ", id)
   //Determine the root directory for the blockfile storage, if it does not exist create it
   rootDir := conf.getLedgerBlockDir(id)
   _, err := util.CreateDirIfMissing(rootDir)
   if err != nil {
      panic(fmt.Sprintf("Error creating block storage root dir [%s]: %s", rootDir, err))
   }
   // Instantiate the manager, i.e. blockFileMgr structure
   mgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: indexStore}

   // cp = checkpointInfo, retrieve from the database the file suffix or number of where blocks were stored.
   // It also retrieves the current size of that file and the last block number that was written to that file.
   // At init checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0]
   cpInfo, err := mgr.loadCurrentInfo()
   if err != nil {
      panic(fmt.Sprintf("Could not get block file info for current block file from db: %s", err))
   }
   if cpInfo == nil {
      logger.Info(`Getting block information from block storage`)
      if cpInfo, err = constructCheckpointInfoFromBlockFiles(rootDir); err != nil {
         panic(fmt.Sprintf("Could not build checkpoint info from block files: %s", err))
      }
      logger.Debugf("Info constructed by scanning the blocks dir = %s", spew.Sdump(cpInfo))
   } else {
      logger.Debug(`Synching block information from block storage (if needed)`)
      syncCPInfoFromFS(rootDir, cpInfo)
   }
   err = mgr.saveCurrentInfo(cpInfo, true)
   if err != nil {
      panic(fmt.Sprintf("Could not save next block file info to db: %s", err))
   }

   //Open a writer to the file identified by the number and truncate it to only contain the latest block
   // that was completely saved (file system, index, cpinfo, etc)
   currentFileWriter, err := newBlockfileWriter(deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum))
   if err != nil {
      panic(fmt.Sprintf("Could not open writer to current file: %s", err))
   }
   //Truncate the file to remove excess past last block
   err = currentFileWriter.truncateFile(cpInfo.latestFileChunksize)
   if err != nil {
      panic(fmt.Sprintf("Could not truncate current file to known size in db: %s", err))
   }

   // Create a new KeyValue store database handler for the blocks index in the keyvalue database
   if mgr.index, err = newBlockIndex(indexConfig, indexStore); err != nil {
      panic(fmt.Sprintf("error in block index: %s", err))
   }

   // Update the manager with the checkpoint info and the file writer
   mgr.cpInfo = cpInfo
   mgr.currentFileWriter = currentFileWriter
   // Create a checkpoint condition (event) variable, for the  goroutine waiting for
   // or announcing the occurrence of an event.
   mgr.cpInfoCond = sync.NewCond(&sync.Mutex{})

   // init BlockchainInfo for external API's
   bcInfo := &common.BlockchainInfo{
      Height:            0,
      CurrentBlockHash:  nil,
      PreviousBlockHash: nil}

   if !cpInfo.isChainEmpty {
      //If start up is a restart of an existing storage, sync the index from block storage and update BlockchainInfo for external API's
      mgr.syncIndex()
      lastBlockHeader, err := mgr.retrieveBlockHeaderByNumber(cpInfo.lastBlockNumber)
      if err != nil {
         panic(fmt.Sprintf("Could not retrieve header of the last block form file: %s", err))
      }
      lastBlockHash := lastBlockHeader.Hash()
      previousBlockHash := lastBlockHeader.PreviousHash
      bcInfo = &common.BlockchainInfo{
         Height:            cpInfo.lastBlockNumber + 1,
         CurrentBlockHash:  lastBlockHash,
         PreviousBlockHash: previousBlockHash}
   }
   mgr.bcInfo.Store(bcInfo)
   return mgr
}

这里主要做几件事情

  • 组装checkpoint,要不从账本文件中抽取,要不从db中拿到,要不就重新创建,如上,主要是记录block在底层存储文件中的位置。
  • 组装blockchaininfo, 主要是记录前一个和当前block的hash,以及整个blockchain的高度。
  • 新建writer,指定紧接着最新block后面的位置开始写入
  • 将block按照各个维度,生成index,写入db
type checkpointInfo struct {
   latestFileChunkSuffixNum int 最新block所在的文件num
   latestFileChunksize      int 下一个block在文件的起始位置
   isChainEmpty             bool 是否是空账本
   lastBlockNumber          uint64 最新的blocknum
}

addBlock

func (mgr *blockfileMgr) addBlock(block *common.Block) error {
   bcInfo := mgr.getBlockchainInfo()
   if block.Header.Number != bcInfo.Height {
      return errors.Errorf(
         "block number should have been %d but was %d",
         mgr.getBlockchainInfo().Height, block.Header.Number,
      )
   }

   // Add the previous hash check - Though, not essential but may not be a bad idea to
   // verify the field `block.Header.PreviousHash` present in the block.
   // This check is a simple bytes comparison and hence does not cause any observable performance penalty
   // and may help in detecting a rare scenario if there is any bug in the ordering service.
   if !bytes.Equal(block.Header.PreviousHash, bcInfo.CurrentBlockHash) {
      return errors.Errorf(
         "unexpected Previous block hash. Expected PreviousHash = [%x], PreviousHash referred in the latest block= [%x]",
         bcInfo.CurrentBlockHash, block.Header.PreviousHash,
      )
   }
   blockBytes, info, err := serializeBlock(block)
   if err != nil {
      return errors.WithMessage(err, "error serializing block")
   }
   blockHash := block.Header.Hash()
   //Get the location / offset where each transaction starts in the block and where the block ends
   txOffsets := info.txOffsets
   currentOffset := mgr.cpInfo.latestFileChunksize

   blockBytesLen := len(blockBytes)
   blockBytesEncodedLen := proto.EncodeVarint(uint64(blockBytesLen))
   totalBytesToAppend := blockBytesLen + len(blockBytesEncodedLen)

   //Determine if we need to start a new file since the size of this block
   //exceeds the amount of space left in the current file
   if currentOffset+totalBytesToAppend > mgr.conf.maxBlockfileSize {
      mgr.moveToNextFile()
      currentOffset = 0
   }
   //append blockBytesEncodedLen to the file
   err = mgr.currentFileWriter.append(blockBytesEncodedLen, false)
   if err == nil {
      //append the actual block bytes to the file
      err = mgr.currentFileWriter.append(blockBytes, true)
   }
   if err != nil {
      truncateErr := mgr.currentFileWriter.truncateFile(mgr.cpInfo.latestFileChunksize)
      if truncateErr != nil {
         panic(fmt.Sprintf("Could not truncate current file to known size after an error during block append: %s", err))
      }
      return errors.WithMessage(err, "error appending block to file")
   }

   //Update the checkpoint info with the results of adding the new block
   currentCPInfo := mgr.cpInfo
   newCPInfo := &checkpointInfo{
      latestFileChunkSuffixNum: currentCPInfo.latestFileChunkSuffixNum,
      latestFileChunksize:      currentCPInfo.latestFileChunksize + totalBytesToAppend,
      isChainEmpty:             false,
      lastBlockNumber:          block.Header.Number}
   //save the checkpoint information in the database
   if err = mgr.saveCurrentInfo(newCPInfo, false); err != nil {
      truncateErr := mgr.currentFileWriter.truncateFile(currentCPInfo.latestFileChunksize)
      if truncateErr != nil {
         panic(fmt.Sprintf("Error in truncating current file to known size after an error in saving checkpoint info: %s", err))
      }
      return errors.WithMessage(err, "error saving current file info to db")
   }

   //Index block file location pointer updated with file suffex and offset for the new block
   blockFLP := &fileLocPointer{fileSuffixNum: newCPInfo.latestFileChunkSuffixNum}
   blockFLP.offset = currentOffset
   // shift the txoffset because we prepend length of bytes before block bytes
   for _, txOffset := range txOffsets {
      txOffset.loc.offset += len(blockBytesEncodedLen)
   }
   //save the index in the database
   if err = mgr.index.indexBlock(&blockIdxInfo{
      blockNum: block.Header.Number, blockHash: blockHash,
      flp: blockFLP, txOffsets: txOffsets, metadata: block.Metadata}); err != nil {
      return err
   }

   //update the checkpoint info (for storage) and the blockchain info (for APIs) in the manager
   mgr.updateCheckpoint(newCPInfo)
   mgr.updateBlockchainInfo(blockHash, block)
   return nil
}
  • 这不,马上就用到了blockchaininfo。一上来就是两个判断,其实就是要保证block前后都必须连续。

  • 这里又用到checkpointinfo,拿到最后那个文件我写到哪了。

  • 计算该block的大小加当前文件的offset是否超过了单个文件size,换句话说就是最后那个block文件能不能放得下,放不下,就顺延到下个文件,从头开始。这里需要注意的是moveToNextFile这里会去更新checkpoint为下个文件的0位置。

  • 又用到writer了,接下来正式将block写进去,过程中如果报错,那事情就大了,回滚掉这次写入。如果没有问题,更新checkpoint。这个东西实在重要,没它根本玩不转。甚至于block写成功了,cp如果没有成功入库,连block都要回滚掉。

  • 更新block里面每个tx的offset,txOffset.loc.offset += len(blockBytesEncodedLen),这里的技巧是txoffset写入的时候是相对blockbytes的位置, 而对应到blockfile的话就不一样,每个block是blocklen+blockbytes组成,txoffset的位置当然也要偏移blocklen才正确

  • 新进来了block,索引当然也要做必要的更新,blockchaininfo也是,checkpointInfo很怪这里又更新一遍,记得么,前面写入文件后不是已经入库了么。

    func (mgr *blockfileMgr) updateCheckpoint(cpInfo *checkpointInfo) {
       mgr.cpInfoCond.L.Lock()
       defer mgr.cpInfoCond.L.Unlock()
       mgr.cpInfo = cpInfo
       logger.Debugf("Broadcasting about update checkpointInfo: %s", cpInfo)
       mgr.cpInfoCond.Broadcast()
    }
    
    func (itr *blocksItr) waitForBlock(blockNum uint64) uint64 {
        itr.mgr.cpInfoCond.L.Lock()
        defer itr.mgr.cpInfoCond.L.Unlock()
        for itr.mgr.cpInfo.lastBlockNumber < blockNum && !itr.shouldClose() {
            logger.Debugf("Going to wait for newer blocks. maxAvailaBlockNumber=[%d], waitForBlockNum=[%d]",
                itr.mgr.cpInfo.lastBlockNumber, blockNum)
            itr.mgr.cpInfoCond.Wait()
            logger.Debugf("Came out of wait. maxAvailaBlockNumber=[%d]", itr.mgr.cpInfo.lastBlockNumber)
        }
        return itr.mgr.cpInfo.lastBlockNumber
    }
    
    • 其实就是去唤醒所以等待block的地方,通知他们你们要的block到了,你们可以继续干活了。

镇楼图

仅供参考,生成自1.0


block-structure.png

你可能感兴趣的:(Hyperledger-Fabric源码分析(ledger-blockstore))