optee的RPC流程的代码详解
生活随笔
收集整理的這篇文章主要介紹了
optee的RPC流程的代码详解
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
文章目錄
- 1、在optee中發起RPC調用
- (1)、rpc_load
- (2)、thread_rpc_cmd
- (3)、thread_rpc
- 2、ATF code
- 3、tee driver中的switch調用(optee_do_call_with_arg)
- 4、tee-supplicant
★★★ 友情鏈接 : 個人博客導讀首頁—點擊此處 ★★★
1、在optee中發起RPC調用
(1)、rpc_load
thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params)
/** Load a TA via RPC with UUID defined by input param @uuid. The virtual* address of the raw TA binary is received in out parameter @ta.*/ static TEE_Result rpc_load(const TEE_UUID *uuid, struct shdr **ta,uint64_t *cookie_ta, size_t *ta_size,struct mobj **mobj) {TEE_Result res;struct optee_msg_param params[2];uint64_t cta = 0;if (!uuid || !ta || !cookie_ta || !mobj || !ta_size)return TEE_ERROR_BAD_PARAMETERS;memset(params, 0, sizeof(params));params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;tee_uuid_to_octets((void *)¶ms[0].u.value, uuid);params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;params[1].u.tmem.buf_ptr = 0;params[1].u.tmem.size = 0;params[1].u.tmem.shm_ref = 0;res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);if (res != TEE_SUCCESS)return res;*mobj = thread_rpc_alloc_payload(params[1].u.tmem.size, &cta);if (!*mobj)return TEE_ERROR_OUT_OF_MEMORY;*ta = mobj_get_va(*mobj, 0);/* We don't expect NULL as thread_rpc_alloc_payload() was successful */assert(*ta);*cookie_ta = cta;*ta_size = params[1].u.tmem.size;params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;tee_uuid_to_octets((void *)¶ms[0].u.value, uuid);msg_param_init_memparam(params + 1, *mobj, 0, params[1].u.tmem.size,cta, MSG_PARAM_MEM_DIR_OUT);res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);if (res != TEE_SUCCESS)thread_rpc_free_payload(cta, *mobj);return res; }(2)、thread_rpc_cmd
thread_rpc(rpc_args);
uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,struct optee_msg_param *params) {uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };struct optee_msg_arg *arg;uint64_t carg;size_t n;/* The source CRYPTO_RNG_SRC_JITTER_RPC is safe to use here */plat_prng_add_jitter_entropy(CRYPTO_RNG_SRC_JITTER_RPC,&thread_rpc_pnum);if (!get_rpc_arg(cmd, num_params, &arg, &carg))return TEE_ERROR_OUT_OF_MEMORY;memcpy(arg->params, params, sizeof(*params) * num_params);reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);thread_rpc(rpc_args);for (n = 0; n < num_params; n++) {switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) {case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:params[n] = arg->params[n];break;default:break;}}return arg->ret; }(3)、thread_rpc
/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ FUNC thread_rpc , :/* Read daif and create an SPSR */mrs x1, daiforr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)/* Mask all maskable exceptions before switching to temporary stack */msr daifset, #DAIFBIT_ALLpush x0, xzrpush x1, x30bl thread_get_ctx_regsldr x30, [sp, #8]store_xregs x0, THREAD_CTX_REGS_X19, 19, 30mov x19, x0bl thread_get_tmp_sppop x1, xzr /* Match "push x1, x30" above */mov x2, spstr x2, [x19, #THREAD_CTX_REGS_SP]ldr x20, [sp] /* Get pointer to rv[] */mov sp, x0 /* Switch to tmp stack */adr x2, .thread_rpc_returnmov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURNbl thread_state_suspendmov x4, x0 /* Supply thread index */ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONEload_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */smc #0b . /* SMC should not return */.thread_rpc_return:/** At this point has the stack pointer been restored to the value* stored in THREAD_CTX above.** Jumps here from thread_resume above when RPC has returned. The* IRQ and FIQ bits are restored to what they where when this* function was originally entered.*/pop x16, xzr /* Get pointer to rv[] */store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */ret END_FUNC thread_rpc KEEP_PAGER thread_rpc2、ATF code
/** OPTEE is returning from a call or being preempted from a call, in* either case execution should resume in the normal world.*/case TEESMC_OPTEED_RETURN_CALL_DONE:/** This is the result from the secure client of an* earlier request. The results are in x0-x3. Copy it* into the non-secure context, save the secure state* and return to the non-secure state.*/assert(handle == cm_get_context(SECURE));cm_el1_sysregs_context_save(SECURE);/* Get a reference to the non-secure context */ns_cpu_context = cm_get_context(NON_SECURE);assert(ns_cpu_context);/* Restore non-secure state */cm_el1_sysregs_context_restore(NON_SECURE);cm_set_next_eret_context(NON_SECURE);SMC_RET4(ns_cpu_context, x1, x2, x3, x4);3、tee driver中的switch調用(optee_do_call_with_arg)
在optee_do_call_with_arg函數中,會將cpu切換到TEE中,然后等待TEE返回.
如果從TEE返回的命令是RPC調用,則會走optee_handle_rpc流程,并通知完成量complete(&supp->reqs_c).
接在tee-supplicant就可以讀取到TEE反向傳來的數據,然后解析數據,進行相應的任務處理.
RPC處理
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,struct optee_call_ctx *call_ctx) {struct tee_device *teedev = ctx->teedev;struct optee *optee = tee_get_drvdata(teedev);struct tee_shm *shm;phys_addr_t pa;switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {case OPTEE_SMC_RPC_FUNC_ALLOC:shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {reg_pair_from_64(¶m->a1, ¶m->a2, pa);reg_pair_from_64(¶m->a4, ¶m->a5,(unsigned long)shm);} else {param->a1 = 0;param->a2 = 0;param->a4 = 0;param->a5 = 0;}break;case OPTEE_SMC_RPC_FUNC_FREE:shm = reg_pair_to_ptr(param->a1, param->a2);tee_shm_free(shm);break;case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:/** A foreign interrupt was raised while secure world was* executing, since they are handled in Linux a dummy RPC is* performed to let Linux take the interrupt through the normal* vector.*/break;case OPTEE_SMC_RPC_FUNC_CMD:shm = reg_pair_to_ptr(param->a1, param->a2);handle_rpc_func_cmd(ctx, optee, shm, call_ctx);break;default:pr_warn("Unknown RPC func 0x%x\n",(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));break;}param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; }handle_rpc_supp_cmd
static void handle_rpc_supp_cmd(struct tee_context *ctx,struct optee_msg_arg *arg) {struct tee_param *params;arg->ret_origin = TEEC_ORIGIN_COMMS;params = kmalloc_array(arg->num_params, sizeof(struct tee_param),GFP_KERNEL);if (!params) {arg->ret = TEEC_ERROR_OUT_OF_MEMORY;return;}if (optee_from_msg_param(params, arg->num_params, arg->params)) {arg->ret = TEEC_ERROR_BAD_PARAMETERS;goto out;}arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);if (optee_to_msg_param(arg->params, arg->num_params, params))arg->ret = TEEC_ERROR_BAD_PARAMETERS; out:kfree(params); }optee_supp_thrd_req
complete(&supp->reqs_c);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,struct tee_param *param){struct optee *optee = tee_get_drvdata(ctx->teedev);struct optee_supp *supp = &optee->supp;struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);bool interruptable;u32 ret;if (!req)return TEEC_ERROR_OUT_OF_MEMORY;init_completion(&req->c);req->func = func;req->num_params = num_params;req->param = param;/* Insert the request in the request list */mutex_lock(&supp->mutex);list_add_tail(&req->link, &supp->reqs);req->in_queue = true;mutex_unlock(&supp->mutex);/* Tell an eventual waiter there's a new request */complete(&supp->reqs_c); ///-----------------------------------通知完成量/** Wait for supplicant to process and return result, once we've* returned from wait_for_completion(&req->c) successfully we have* exclusive access again.*/while (wait_for_completion_interruptible(&req->c)) {mutex_lock(&supp->mutex);interruptable = !supp->ctx;if (interruptable) {/** There's no supplicant available and since the* supp->mutex currently is held none can* become available until the mutex released* again.** Interrupting an RPC to supplicant is only* allowed as a way of slightly improving the user* experience in case the supplicant hasn't been* started yet. During normal operation the supplicant* will serve all requests in a timely manner and* interrupting then wouldn't make sense.*/if (req->in_queue) {list_del(&req->link);req->in_queue = false;}}mutex_unlock(&supp->mutex);if (interruptable) {req->ret = TEEC_ERROR_COMMUNICATION;break;}}ret = req->ret;kfree(req);return ret; }optee_supp_recv
wait_for_completion_interruptible(&supp->reqs_c)
/*** optee_supp_recv() - receive request for supplicant* @ctx: context receiving the request* @func: requested function in supplicant* @num_params: number of elements allocated in @param, updated with number* used elements* @param: space for parameters for @func** Returns 0 on success or <0 on failure*/ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,struct tee_param *param) {struct tee_device *teedev = ctx->teedev;struct optee *optee = tee_get_drvdata(teedev);struct optee_supp *supp = &optee->supp;struct optee_supp_req *req = NULL;int id;size_t num_meta;int rc;rc = supp_check_recv_params(*num_params, param, &num_meta);if (rc)return rc;while (true) {mutex_lock(&supp->mutex);req = supp_pop_entry(supp, *num_params - num_meta, &id);mutex_unlock(&supp->mutex);if (req) {if (IS_ERR(req))return PTR_ERR(req);break;}/** If we didn't get a request we'll block in* wait_for_completion() to avoid needless spinning.** This is where supplicant will be hanging most of* the time, let's make this interruptable so we* can easily restart supplicant if needed.*/if (wait_for_completion_interruptible(&supp->reqs_c))return -ERESTARTSYS;}if (num_meta) {/** tee-supplicant support meta parameters -> requsts can be* processed asynchronously.*/param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |TEE_IOCTL_PARAM_ATTR_META;param->u.value.a = id;param->u.value.b = 0;param->u.value.c = 0;} else {mutex_lock(&supp->mutex);supp->req_id = id;mutex_unlock(&supp->mutex);}*func = req->func;*num_params = req->num_params + num_meta;memcpy(param + num_meta, req->param,sizeof(struct tee_param) * req->num_params);return 0; }4、tee-supplicant
tee-supplicant可以看做是一個守護進程,死循環調用ioctl,用于接受TEE側RPC反向調用傳來的命令,然后解析命令,發起響應的操作。
main—>process_one_request—>read_request—>ioctl(fd, TEE_IOC_SUPPL_RECV, &data)
int main(int argc, char *argv[]) {struct thread_arg arg = { .fd = -1 };int e;e = pthread_mutex_init(&arg.mutex, NULL);if (e) {EMSG("pthread_mutex_init: %s", strerror(e));EMSG("terminating...");exit(EXIT_FAILURE);}if (argc > 2)return usage();if (argc == 2) {arg.fd = open_dev(argv[1], &arg.gen_caps);if (arg.fd < 0) {EMSG("failed to open \"%s\"", argv[1]);exit(EXIT_FAILURE);}} else {//---------------------------------------------沒有傳參數,走這里arg.fd = get_dev_fd(&arg.gen_caps);--------------------打開"/dev/teepriv0"節點,返回fdif (arg.fd < 0) {EMSG("failed to find an OP-TEE supplicant device");exit(EXIT_FAILURE);}}if (tee_supp_fs_init() != 0) {EMSG("error tee_supp_fs_init");exit(EXIT_FAILURE);}while (!arg.abort) {if (!process_one_request(&arg))------在這里調用ioctl(fd, TEE_IOC_SUPPL_RECV, &data),在驅動程序中阻塞等待完成量arg.abort = true;}close(arg.fd);return EXIT_FAILURE; }static bool process_one_request(struct thread_arg *arg) {union tee_rpc_invoke request;size_t num_params;size_t num_meta;struct tee_ioctl_param *params;uint32_t func;uint32_t ret;DMSG("looping");memset(&request, 0, sizeof(request));request.recv.num_params = RPC_NUM_PARAMS;/* Let it be known that we can deal with meta parameters */params = (struct tee_ioctl_param *)(&request.send + 1);params->attr = TEE_IOCTL_PARAM_ATTR_META;num_waiters_inc(arg);if (!read_request(arg->fd, &request))return false;if (!find_params(&request, &func, &num_params, ¶ms, &num_meta))return false;if (num_meta && !num_waiters_dec(arg) && !spawn_thread(arg))return false;switch (func) {case OPTEE_MSG_RPC_CMD_LOAD_TA:ret = load_ta(num_params, params);break;case OPTEE_MSG_RPC_CMD_FS:ret = tee_supp_fs_process(num_params, params);break;case OPTEE_MSG_RPC_CMD_RPMB:ret = process_rpmb(num_params, params);break;case OPTEE_MSG_RPC_CMD_SHM_ALLOC:ret = process_alloc(arg, num_params, params);break;case OPTEE_MSG_RPC_CMD_SHM_FREE:ret = process_free(num_params, params);break;case OPTEE_MSG_RPC_CMD_GPROF:ret = gprof_process(num_params, params);break;case OPTEE_MSG_RPC_CMD_SOCKET:ret = tee_socket_process(num_params, params);break;default:EMSG("Cmd [0x%" PRIx32 "] not supported", func);/* Not supported. */ret = TEEC_ERROR_NOT_SUPPORTED;break;}request.send.ret = ret;return write_response(arg->fd, &request); } #### 5、tee driver的RPC等待當tee-supplicant調用了ioctl的TEE_IOC_SUPPL_RECV后,對應的執行linux kernel驅動程序中的optee_supp_recv函數, 在optee_supp_recv中,程序會卡在wait_for_completion_interruptible(&supp->reqs_c)處,等待完成量通知,當CPU從TEE以RPC的方式切回 來時,才會compelete此完成量file_operations綁定到了dev/tee0、dev/teepriv0設備節點static const struct file_operations tee_fops = {.owner = THIS_MODULE,.open = tee_open,.release = tee_release,.unlocked_ioctl = tee_ioctl,.compat_ioctl = tee_ioctl, };static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {struct tee_context *ctx = filp->private_data;void __user *uarg = (void __user *)arg;switch (cmd) {case TEE_IOC_VERSION:return tee_ioctl_version(ctx, uarg);case TEE_IOC_SHM_ALLOC:return tee_ioctl_shm_alloc(ctx, uarg);case TEE_IOC_SHM_REGISTER:return tee_ioctl_shm_register(ctx, uarg); /** Backport from upstreaming patch:* "tee: new ioctl to a register tee_shm from a dmabuf file descriptor"*/case TEE_IOC_SHM_REGISTER_FD:return tee_ioctl_shm_register_fd(ctx, uarg); /* End of backporting from upstreaming patch */case TEE_IOC_OPEN_SESSION:return tee_ioctl_open_session(ctx, uarg);case TEE_IOC_INVOKE:return tee_ioctl_invoke(ctx, uarg);case TEE_IOC_CANCEL:return tee_ioctl_cancel(ctx, uarg);case TEE_IOC_CLOSE_SESSION:return tee_ioctl_close_session(ctx, uarg);case TEE_IOC_SUPPL_RECV:return tee_ioctl_supp_recv(ctx, uarg);case TEE_IOC_SUPPL_SEND:return tee_ioctl_supp_send(ctx, uarg);default:return -EINVAL;} }static int tee_ioctl_supp_recv(struct tee_context *ctx,struct tee_ioctl_buf_data __user *ubuf) {int rc;struct tee_ioctl_buf_data buf;struct tee_iocl_supp_recv_arg __user *uarg;struct tee_param *params;u32 num_params;u32 func;if (!ctx->teedev->desc->ops->supp_recv)return -EINVAL;if (copy_from_user(&buf, ubuf, sizeof(buf)))return -EFAULT;if (buf.buf_len > TEE_MAX_ARG_SIZE ||buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))return -EINVAL;uarg = u64_to_user_ptr(buf.buf_ptr);if (get_user(num_params, &uarg->num_params))return -EFAULT;if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)return -EINVAL;params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);if (!params)return -ENOMEM;rc = params_from_user(ctx, params, num_params, uarg->params);if (rc)goto out;rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);if (rc)goto out;if (put_user(func, &uarg->func) ||put_user(num_params, &uarg->num_params)) {rc = -EFAULT;goto out;}rc = params_to_supp(ctx, uarg->params, num_params, params); out:kfree(params);return rc; } /*** optee_supp_recv() - receive request for supplicant* @ctx: context receiving the request* @func: requested function in supplicant* @num_params: number of elements allocated in @param, updated with number* used elements* @param: space for parameters for @func** Returns 0 on success or <0 on failure*/ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,struct tee_param *param) {struct tee_device *teedev = ctx->teedev;struct optee *optee = tee_get_drvdata(teedev);struct optee_supp *supp = &optee->supp;struct optee_supp_req *req = NULL;int id;size_t num_meta;int rc;rc = supp_check_recv_params(*num_params, param, &num_meta);if (rc)return rc;while (true) {mutex_lock(&supp->mutex);req = supp_pop_entry(supp, *num_params - num_meta, &id);mutex_unlock(&supp->mutex);if (req) {if (IS_ERR(req))return PTR_ERR(req);break;}/** If we didn't get a request we'll block in* wait_for_completion() to avoid needless spinning.** This is where supplicant will be hanging most of* the time, let's make this interruptable so we* can easily restart supplicant if needed.*/if (wait_for_completion_interruptible(&supp->reqs_c)) //-------------------------此處等待完成量return -ERESTARTSYS;}if (num_meta) {/** tee-supplicant support meta parameters -> requsts can be* processed asynchronously.*/param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |TEE_IOCTL_PARAM_ATTR_META;param->u.value.a = id;param->u.value.b = 0;param->u.value.c = 0;} else {mutex_lock(&supp->mutex);supp->req_id = id;mutex_unlock(&supp->mutex);}*func = req->func;*num_params = req->num_params + num_meta;memcpy(param + num_meta, req->param,sizeof(struct tee_param) * req->num_params);return 0; }總結
以上是生活随笔為你收集整理的optee的RPC流程的代码详解的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: optee的RPC设计(模型)详解
- 下一篇: optee对std smc的处理的详解