ftpm的全称是firmware TPM,基本就是把原本需要外接tpm芯片做的事情放到firmware中
其参考实现https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
我们重点看看其kernel中驱动的中tpm_class_ops 的实现,char/tpm/tpm_ftpm_tee.c
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
.recv = ftpm_tee_tpm_op_recv,
.send = ftpm_tee_tpm_op_send,
.cancel = ftpm_tee_tpm_op_cancel,
.status = ftpm_tee_tpm_op_status,
.req_complete_mask = 0,
.req_complete_val = 0,
.req_canceled = ftpm_tee_tpm_req_canceled,
};
tpm_class_ops 就代表了发送和接受tpm 命令的能力,不管解析这个命令的是firmware还是tpm芯片
static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
size_t resp_len;
int rc;
u8 *temp_buf;
struct tpm_header *resp_header;
struct tee_ioctl_invoke_arg transceive_args;
struct tee_param command_params[4];
struct tee_shm *shm = pvt_data->shm;
if (len > MAX_COMMAND_SIZE) {
dev_err(&chip->dev,
"%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
__func__, len);
return -EIO;
}
memset(&transceive_args, 0, sizeof(transceive_args));
memset(command_params, 0, sizeof(command_params));
pvt_data->resp_len = 0;
#准备要发给tee的命令
/* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
transceive_args = (struct tee_ioctl_invoke_arg) {
.func = FTPM_OPTEE_TA_SUBMIT_COMMAND,
.session = pvt_data->session,
.num_params = 4,
};
/* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */
command_params[0] = (struct tee_param) {
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
.u.memref = {
.shm = shm,
.size = len,
.shm_offs = 0,
},
};
#tpm驱动和tee 采用共享内存交换数据
temp_buf = tee_shm_get_va(shm, 0);
if (IS_ERR(temp_buf)) {
dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n",
__func__);
return PTR_ERR(temp_buf);
}
memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
memcpy(temp_buf, buf, len);
command_params[1] = (struct tee_param) {
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
.u.memref = {
.shm = shm,
.size = MAX_RESPONSE_SIZE,
.shm_offs = MAX_COMMAND_SIZE,
},
};
#陷入到tee中处理,这里也可以看到tee的处理完成才返回,这里是同步处理
rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args,
command_params);
if ((rc < 0) || (transceive_args.ret != 0)) {
dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n",
__func__, transceive_args.ret);
return (rc < 0) ? rc : transceive_args.ret;
}
#得到返回的共享内存数据
temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs);
if (IS_ERR(temp_buf)) {
dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n",
__func__);
return PTR_ERR(temp_buf);
}
#解析返回的数据
resp_header = (struct tpm_header *)temp_buf;
resp_len = be32_to_cpu(resp_header->length);
/* sanity check resp_len */
if (resp_len < TPM_HEADER_SIZE) {
dev_err(&chip->dev, "%s: tpm response header too small\n",
__func__);
return -EIO;
}
if (resp_len > MAX_RESPONSE_SIZE) {
dev_err(&chip->dev,
"%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n",
__func__, resp_len);
return -EIO;
}
/* sanity checks look good, cache the response */
memcpy(pvt_data->resp_buf, temp_buf, resp_len);
pvt_data->resp_len = resp_len;
return 0;
}
从这里看firmware tpm就是通过在tee 中完成原本在tpm 芯片中做的事情,这个方案的好处是节省一个tpm 芯片?