baron · 3月28日 · 四川

optee的RPC流程的代码详解

快速连接

👉👉👉【精选】ARMv8/ARMv9架构入门到精通-目录 👈👈👈


 title=

1、在optee中发起RPC调用

(1)、rpc_load

thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params)

/*
 * Load a TA via RPC with UUID defined by input param @uuid. The virtual
 * address of the raw TA binary is received in out parameter @ta.
 */
static TEE_Result rpc_load(const TEE_UUID *uuid, struct shdr **ta,
               uint64_t *cookie_ta, size_t *ta_size,
               struct mobj **mobj)
{
    TEE_Result res;
    struct optee_msg_param params[2];
    uint64_t cta = 0;

    if (!uuid || !ta || !cookie_ta || !mobj || !ta_size)
        return TEE_ERROR_BAD_PARAMETERS;

    memset(params, 0, sizeof(params));
    params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
    tee_uuid_to_octets((void *)&params[0].u.value, uuid);
    params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
    params[1].u.tmem.buf_ptr = 0;
    params[1].u.tmem.size = 0;
    params[1].u.tmem.shm_ref = 0;

    res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
    if (res != TEE_SUCCESS)
        return res;

    *mobj = thread_rpc_alloc_payload(params[1].u.tmem.size, &cta);
    if (!*mobj)
        return TEE_ERROR_OUT_OF_MEMORY;

    *ta = mobj_get_va(*mobj, 0);
    /* We don't expect NULL as thread_rpc_alloc_payload() was successful */
    assert(*ta);
    *cookie_ta = cta;
    *ta_size = params[1].u.tmem.size;

    params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
    tee_uuid_to_octets((void *)&params[0].u.value, uuid);
    msg_param_init_memparam(params + 1, *mobj, 0, params[1].u.tmem.size,
                cta, MSG_PARAM_MEM_DIR_OUT);

    res = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_LOAD_TA, 2, params);
    if (res != TEE_SUCCESS)
        thread_rpc_free_payload(cta, *mobj);
    return res;
}
(2)、thread_rpc_cmd

thread_rpc(rpc_args);

uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
            struct optee_msg_param *params)
{
    uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
    struct optee_msg_arg *arg;
    uint64_t carg;
    size_t n;

    /* The source CRYPTO_RNG_SRC_JITTER_RPC is safe to use here */
    plat_prng_add_jitter_entropy(CRYPTO_RNG_SRC_JITTER_RPC,
                     &thread_rpc_pnum);

    if (!get_rpc_arg(cmd, num_params, &arg, &carg))
        return TEE_ERROR_OUT_OF_MEMORY;

    memcpy(arg->params, params, sizeof(*params) * num_params);

    reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
    thread_rpc(rpc_args);
    for (n = 0; n < num_params; n++) {
        switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) {
        case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
        case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
        case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
        case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
        case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
        case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
            params[n] = arg->params[n];
            break;
        default:
            break;
        }
    }
    return arg->ret;
}
(3)、thread_rpc

/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
FUNC thread_rpc , :
    /* Read daif and create an SPSR */
    mrs    x1, daif
    orr    x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)

    /* Mask all maskable exceptions before switching to temporary stack */
    msr    daifset, #DAIFBIT_ALL
    push    x0, xzr
    push    x1, x30
    bl    thread_get_ctx_regs
    ldr    x30, [sp, #8]
    store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
    mov    x19, x0

    bl    thread_get_tmp_sp
    pop    x1, xzr        /* Match "push x1, x30" above */
    mov    x2, sp
    str    x2, [x19, #THREAD_CTX_REGS_SP]
    ldr    x20, [sp]    /* Get pointer to rv[] */
    mov    sp, x0        /* Switch to tmp stack */

    adr    x2, .thread_rpc_return
    mov    w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
    bl    thread_state_suspend
    mov    x4, x0        /* Supply thread index */
    ldr    w0, =TEESMC_OPTEED_RETURN_CALL_DONE
    load_wregs x20, 0, 1, 3    /* Load rv[] into w0-w2 */
    smc    #0
    b    .        /* SMC should not return */

.thread_rpc_return:
    /*
     * At this point has the stack pointer been restored to the value
     * stored in THREAD_CTX above.
     *
     * Jumps here from thread_resume above when RPC has returned. The
     * IRQ and FIQ bits are restored to what they where when this
     * function was originally entered.
     */
    pop    x16, xzr    /* Get pointer to rv[] */
    store_wregs x16, 0, 0, 5    /* Store w0-w5 into rv[] */
    ret
END_FUNC thread_rpc
KEEP_PAGER thread_rpc

2、ATF code

    /*
     * OPTEE is returning from a call or being preempted from a call, in
     * either case execution should resume in the normal world.
     */
    case TEESMC_OPTEED_RETURN_CALL_DONE:
        /*
         * This is the result from the secure client of an
         * earlier request. The results are in x0-x3. Copy it
         * into the non-secure context, save the secure state
         * and return to the non-secure state.
         */
        assert(handle == cm_get_context(SECURE));
        cm_el1_sysregs_context_save(SECURE);

        /* Get a reference to the non-secure context */
        ns_cpu_context = cm_get_context(NON_SECURE);
        assert(ns_cpu_context);

        /* Restore non-secure state */
        cm_el1_sysregs_context_restore(NON_SECURE);
        cm_set_next_eret_context(NON_SECURE);

        SMC_RET4(ns_cpu_context, x1, x2, x3, x4);

3、tee driver中的switch调用(optee_do_call_with_arg)

在optee_do_call_with_arg函数中,会将cpu切换到TEE中,然后等待TEE返回.
如果从TEE返回的命令是RPC调用,则会走optee_handle_rpc流程,并通知完成量complete(&supp->reqs_c).
接在tee-supplicant就可以读取到TEE反向传来的数据,然后解析数据,进行相应的任务处理.

u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
{
    struct optee *optee = tee_get_drvdata(ctx->teedev);
    struct optee_call_waiter w;
    struct optee_rpc_param param = { };
    struct optee_call_ctx call_ctx = { };
    u32 ret;

    param.a0 = OPTEE_SMC_CALL_WITH_ARG;
    reg_pair_from_64(&param.a1, &param.a2, parg);
    /* Initialize waiter */
    optee_cq_wait_init(&optee->call_queue, &w);
    while (true) {
        struct arm_smccc_res res;
        optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
                 param.a4, param.a5, param.a6, param.a7,
                 &res);
        if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
            /*
             * Out of threads in secure world, wait for a thread
             * become available.
             */
            optee_cq_wait_for_completion(&optee->call_queue, &w);
        } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
            param.a0 = res.a0;
            param.a1 = res.a1;
            param.a2 = res.a2;
            param.a3 = res.a3;
            optee_handle_rpc(ctx, &param, &call_ctx);
        } else {
            ret = res.a0;
            break;
        }
    }

    optee_rpc_finalize_call(&call_ctx);
    /*
     * We're done with our thread in secure world, if there's any
     * thread waiters wake up one.
     */
    optee_cq_wait_final(&optee->call_queue, &w);

    return ret;
}

RPC处理

void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
              struct optee_call_ctx *call_ctx)
{
    struct tee_device *teedev = ctx->teedev;
    struct optee *optee = tee_get_drvdata(teedev);
    struct tee_shm *shm;
    phys_addr_t pa;

    switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
    case OPTEE_SMC_RPC_FUNC_ALLOC:
        shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
        if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
            reg_pair_from_64(&param->a1, &param->a2, pa);
            reg_pair_from_64(&param->a4, &param->a5,
                     (unsigned long)shm);
        } else {
            param->a1 = 0;
            param->a2 = 0;
            param->a4 = 0;
            param->a5 = 0;
        }
        break;
    case OPTEE_SMC_RPC_FUNC_FREE:
        shm = reg_pair_to_ptr(param->a1, param->a2);
        tee_shm_free(shm);
        break;
    case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
        /*
         * A foreign interrupt was raised while secure world was
         * executing, since they are handled in Linux a dummy RPC is
         * performed to let Linux take the interrupt through the normal
         * vector.
         */
        break;
    case OPTEE_SMC_RPC_FUNC_CMD:
        shm = reg_pair_to_ptr(param->a1, param->a2);
        handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
        break;
    default:
        pr_warn("Unknown RPC func 0x%x\n",
            (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
        break;
    }

    param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
}

handle_rpc_supp_cmd

static void handle_rpc_supp_cmd(struct tee_context *ctx,
                struct optee_msg_arg *arg)
{
    struct tee_param *params;

    arg->ret_origin = TEEC_ORIGIN_COMMS;

    params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
                   GFP_KERNEL);
    if (!params) {
        arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
        return;
    }

    if (optee_from_msg_param(params, arg->num_params, arg->params)) {
        arg->ret = TEEC_ERROR_BAD_PARAMETERS;
        goto out;
    }

    arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);

    if (optee_to_msg_param(arg->params, arg->num_params, params))
        arg->ret = TEEC_ERROR_BAD_PARAMETERS;
out:
    kfree(params);
}

optee_supp_thrd_req

complete(&supp->reqs_c);

u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
            struct tee_param *param)

{
    struct optee *optee = tee_get_drvdata(ctx->teedev);
    struct optee_supp *supp = &optee->supp;
    struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
    bool interruptable;
    u32 ret;

    if (!req)
        return TEEC_ERROR_OUT_OF_MEMORY;

    init_completion(&req->c);
    req->func = func;
    req->num_params = num_params;
    req->param = param;

    /* Insert the request in the request list */
    mutex_lock(&supp->mutex);
    list_add_tail(&req->link, &supp->reqs);
    req->in_queue = true;
    mutex_unlock(&supp->mutex);

    /* Tell an eventual waiter there's a new request */
    complete(&supp->reqs_c); ///-----------------------------------通知完成量

    /*
     * Wait for supplicant to process and return result, once we've
     * returned from wait_for_completion(&req->c) successfully we have
     * exclusive access again.
     */
    while (wait_for_completion_interruptible(&req->c)) {
        mutex_lock(&supp->mutex);
        interruptable = !supp->ctx;
        if (interruptable) {
            /*
             * There's no supplicant available and since the
             * supp->mutex currently is held none can
             * become available until the mutex released
             * again.
             *
             * Interrupting an RPC to supplicant is only
             * allowed as a way of slightly improving the user
             * experience in case the supplicant hasn't been
             * started yet. During normal operation the supplicant
             * will serve all requests in a timely manner and
             * interrupting then wouldn't make sense.
             */
            if (req->in_queue) {
                list_del(&req->link);
                req->in_queue = false;
            }
        }
        mutex_unlock(&supp->mutex);

        if (interruptable) {
            req->ret = TEEC_ERROR_COMMUNICATION;
            break;
        }
    }

    ret = req->ret;
    kfree(req);

    return ret;
}

optee_supp_recv

wait_for_completion_interruptible(&supp->reqs_c)


/**
 * optee_supp_recv() - receive request for supplicant
 * @ctx:    context receiving the request
 * @func:    requested function in supplicant
 * @num_params:    number of elements allocated in @param, updated with number
 *        used elements
 * @param:    space for parameters for @func
 *
 * Returns 0 on success or <0 on failure
 */
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
            struct tee_param *param)
{
    struct tee_device *teedev = ctx->teedev;
    struct optee *optee = tee_get_drvdata(teedev);
    struct optee_supp *supp = &optee->supp;
    struct optee_supp_req *req = NULL;
    int id;
    size_t num_meta;
    int rc;

    rc = supp_check_recv_params(*num_params, param, &num_meta);
    if (rc)
        return rc;

    while (true) {
        mutex_lock(&supp->mutex);
        req = supp_pop_entry(supp, *num_params - num_meta, &id);
        mutex_unlock(&supp->mutex);

        if (req) {
            if (IS_ERR(req))
                return PTR_ERR(req);
            break;
        }

        /*
         * If we didn't get a request we'll block in
         * wait_for_completion() to avoid needless spinning.
         *
         * This is where supplicant will be hanging most of
         * the time, let's make this interruptable so we
         * can easily restart supplicant if needed.
         */
        if (wait_for_completion_interruptible(&supp->reqs_c))
            return -ERESTARTSYS;
    }

    if (num_meta) {
        /*
         * tee-supplicant support meta parameters -> requsts can be
         * processed asynchronously.
         */
        param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
                  TEE_IOCTL_PARAM_ATTR_META;
        param->u.value.a = id;
        param->u.value.b = 0;
        param->u.value.c = 0;
    } else {
        mutex_lock(&supp->mutex);
        supp->req_id = id;
        mutex_unlock(&supp->mutex);
    }

    *func = req->func;
    *num_params = req->num_params + num_meta;
    memcpy(param + num_meta, req->param,
           sizeof(struct tee_param) * req->num_params);

    return 0;
}

4、tee-supplicant

tee-supplicant可以看做是一个守护进程,死循环调用ioctl,用于接受TEE侧RPC反向调用传来的命令,然后解析命令,发起响应的操作。

main--->process_one_request--->read_request--->ioctl(fd, TEE_IOC_SUPPL_RECV, &data)

int main(int argc, char *argv[])
{
    struct thread_arg arg = { .fd = -1 };
    int e;

    e = pthread_mutex_init(&arg.mutex, NULL);
    if (e) {
        EMSG("pthread_mutex_init: %s", strerror(e));
        EMSG("terminating...");
        exit(EXIT_FAILURE);
    }

    if (argc > 2)
        return usage();
    if (argc == 2) {
        arg.fd = open_dev(argv[1], &arg.gen_caps);
        if (arg.fd < 0) {
            EMSG("failed to open \"%s\"", argv[1]);
            exit(EXIT_FAILURE);
        }
    } else {
        //---------------------------------------------没有传参数,走这里
        arg.fd = get_dev_fd(&arg.gen_caps);--------------------打开"/dev/teepriv0"节点,返回fd
        if (arg.fd < 0) {
            EMSG("failed to find an OP-TEE supplicant device");
            exit(EXIT_FAILURE);
        }
    }

    if (tee_supp_fs_init() != 0) {
        EMSG("error tee_supp_fs_init");
        exit(EXIT_FAILURE);
    }

    while (!arg.abort) {
        if (!process_one_request(&arg))------在这里调用ioctl(fd, TEE_IOC_SUPPL_RECV, &data),在驱动程序中阻塞等待完成量
            arg.abort = true;
    }

    close(arg.fd);

    return EXIT_FAILURE;
}

static bool process_one_request(struct thread_arg *arg)
{
    union tee_rpc_invoke request;
    size_t num_params;
    size_t num_meta;
    struct tee_ioctl_param *params;
    uint32_t func;
    uint32_t ret;

    DMSG("looping");
    memset(&request, 0, sizeof(request));
    request.recv.num_params = RPC_NUM_PARAMS;

    /* Let it be known that we can deal with meta parameters */
    params = (struct tee_ioctl_param *)(&request.send + 1);
    params->attr = TEE_IOCTL_PARAM_ATTR_META;

    num_waiters_inc(arg);

    if (!read_request(arg->fd, &request))
        return false;

    if (!find_params(&request, &func, &num_params, &params, &num_meta))
        return false;

    if (num_meta && !num_waiters_dec(arg) && !spawn_thread(arg))
        return false;

    switch (func) {
    case OPTEE_MSG_RPC_CMD_LOAD_TA:
        ret = load_ta(num_params, params);
        break;
    case OPTEE_MSG_RPC_CMD_FS:
        ret = tee_supp_fs_process(num_params, params);
        break;
    case OPTEE_MSG_RPC_CMD_RPMB:
        ret = process_rpmb(num_params, params);
        break;
    case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
        ret = process_alloc(arg, num_params, params);
        break;
    case OPTEE_MSG_RPC_CMD_SHM_FREE:
        ret = process_free(num_params, params);
        break;
    case OPTEE_MSG_RPC_CMD_GPROF:
        ret = gprof_process(num_params, params);
        break;
    case OPTEE_MSG_RPC_CMD_SOCKET:
        ret = tee_socket_process(num_params, params);
        break;
    default:
        EMSG("Cmd [0x%" PRIx32 "] not supported", func);
        /* Not supported. */
        ret = TEEC_ERROR_NOT_SUPPORTED;
        break;
    }

    request.send.ret = ret;
    return write_response(arg->fd, &request);
}


#### 5、tee driver的RPC等待

当tee-supplicant调用了ioctl的TEE_IOC_SUPPL_RECV后,对应的执行linux kernel驱动程序中的optee_supp_recv函数,
在optee_supp_recv中,程序会卡在wait_for_completion_interruptible(&supp->reqs_c)处,等待完成量通知,当CPU从TEE以RPC的方式切回
来时,才会compelete此完成量

file_operations绑定到了dev/tee0、dev/teepriv0设备节点

static const struct file_operations tee_fops = {
    .owner = THIS_MODULE,
    .open = tee_open,
    .release = tee_release,
    .unlocked_ioctl = tee_ioctl,
    .compat_ioctl = tee_ioctl,
};

static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    struct tee_context *ctx = filp->private_data;
    void __user *uarg = (void __user *)arg;

    switch (cmd) {
    case TEE_IOC_VERSION:
        return tee_ioctl_version(ctx, uarg);
    case TEE_IOC_SHM_ALLOC:
        return tee_ioctl_shm_alloc(ctx, uarg);
    case TEE_IOC_SHM_REGISTER:
        return tee_ioctl_shm_register(ctx, uarg);
/*
 * Backport from upstreaming patch:
 * "tee: new ioctl to a register tee_shm from a dmabuf file descriptor"
 */
    case TEE_IOC_SHM_REGISTER_FD:
        return tee_ioctl_shm_register_fd(ctx, uarg);
/* End of backporting from upstreaming patch */
    case TEE_IOC_OPEN_SESSION:
        return tee_ioctl_open_session(ctx, uarg);
    case TEE_IOC_INVOKE:
        return tee_ioctl_invoke(ctx, uarg);
    case TEE_IOC_CANCEL:
        return tee_ioctl_cancel(ctx, uarg);
    case TEE_IOC_CLOSE_SESSION:
        return tee_ioctl_close_session(ctx, uarg);
    case TEE_IOC_SUPPL_RECV:
        return tee_ioctl_supp_recv(ctx, uarg);
    case TEE_IOC_SUPPL_SEND:
        return tee_ioctl_supp_send(ctx, uarg);
    default:
        return -EINVAL;
    }
}


static int tee_ioctl_supp_recv(struct tee_context *ctx,
                   struct tee_ioctl_buf_data __user *ubuf)
{
    int rc;
    struct tee_ioctl_buf_data buf;
    struct tee_iocl_supp_recv_arg __user *uarg;
    struct tee_param *params;
    u32 num_params;
    u32 func;

    if (!ctx->teedev->desc->ops->supp_recv)
        return -EINVAL;

    if (copy_from_user(&buf, ubuf, sizeof(buf)))
        return -EFAULT;

    if (buf.buf_len > TEE_MAX_ARG_SIZE ||
        buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
        return -EINVAL;

    uarg = u64_to_user_ptr(buf.buf_ptr);
    if (get_user(num_params, &uarg->num_params))
        return -EFAULT;

    if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
        return -EINVAL;

    params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
    if (!params)
        return -ENOMEM;

    rc = params_from_user(ctx, params, num_params, uarg->params);
    if (rc)
        goto out;

    rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
    if (rc)
        goto out;

    if (put_user(func, &uarg->func) ||
        put_user(num_params, &uarg->num_params)) {
        rc = -EFAULT;
        goto out;
    }

    rc = params_to_supp(ctx, uarg->params, num_params, params);
out:
    kfree(params);
    return rc;
}
/**
 * optee_supp_recv() - receive request for supplicant
 * @ctx:    context receiving the request
 * @func:    requested function in supplicant
 * @num_params:    number of elements allocated in @param, updated with number
 *        used elements
 * @param:    space for parameters for @func
 *
 * Returns 0 on success or <0 on failure
 */
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
            struct tee_param *param)
{
    struct tee_device *teedev = ctx->teedev;
    struct optee *optee = tee_get_drvdata(teedev);
    struct optee_supp *supp = &optee->supp;
    struct optee_supp_req *req = NULL;
    int id;
    size_t num_meta;
    int rc;

    rc = supp_check_recv_params(*num_params, param, &num_meta);
    if (rc)
        return rc;

    while (true) {
        mutex_lock(&supp->mutex);
        req = supp_pop_entry(supp, *num_params - num_meta, &id);
        mutex_unlock(&supp->mutex);

        if (req) {
            if (IS_ERR(req))
                return PTR_ERR(req);
            break;
        }

        /*
         * If we didn't get a request we'll block in
         * wait_for_completion() to avoid needless spinning.
         *
         * This is where supplicant will be hanging most of
         * the time, let's make this interruptable so we
         * can easily restart supplicant if needed.
         */
        if (wait_for_completion_interruptible(&supp->reqs_c))  //-------------------------此处等待完成量
            return -ERESTARTSYS;
    }

    if (num_meta) {
        /*
         * tee-supplicant support meta parameters -> requsts can be
         * processed asynchronously.
         */
        param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
                  TEE_IOCTL_PARAM_ATTR_META;
        param->u.value.a = id;
        param->u.value.b = 0;
        param->u.value.c = 0;
    } else {
        mutex_lock(&supp->mutex);
        supp->req_id = id;
        mutex_unlock(&supp->mutex);
    }

    *func = req->func;
    *num_params = req->num_params + num_meta;
    memcpy(param + num_meta, req->param,
           sizeof(struct tee_param) * req->num_params);

    return 0;
}

关注"Arm精选"公众号,备注进ARM交流讨论区。
图片1.png

推荐阅读
关注数
9462
内容数
207
以易懂、渐进、有序的方式,深入探讨ARMv8/ARMv9架构的核心概念。我们将从基础知识开始,逐步深入,覆盖最新的架构,不再纠缠于过时技术。本系列内容包含但不限于ARM基础、SOC芯片基础、Trustzone、gic、异常和中断、AMBA、Cache、MMU等内容,并将持续更新。
目录
极术微信服务号
关注极术微信号
实时接收点赞提醒和评论通知
安谋科技学堂公众号
关注安谋科技学堂
实时获取安谋科技及 Arm 教学资源
安谋科技招聘公众号
关注安谋科技招聘
实时获取安谋科技中国职位信息