diff --git a/app/prj.conf b/app/prj.conf index 260488e7852b..5c22e19d6e0e 100644 --- a/app/prj.conf +++ b/app/prj.conf @@ -20,10 +20,9 @@ CONFIG_LOG=y CONFIG_LOG_PRINTK=y # Log processing is offloaded to a low-priority thread. CONFIG_LOG_MODE_DEFERRED=y -# Wake the low-priority log thread every 100ms and/or -# if more than 5 messages are queued by the frontend. -CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD=5 -CONFIG_LOG_PROCESS_THREAD_SLEEP_MS=100 +# Wake the low-priority log thread every time new log +# messages are available +CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD=1 # Frontend buffer must be large enough to cover all # typical bursts of log messages. CONFIG_LOG_BUFFER_SIZE=4096 diff --git a/posix/include/rtos/alloc.h b/posix/include/rtos/alloc.h index b8efac0d3112..84751ef9f1f4 100644 --- a/posix/include/rtos/alloc.h +++ b/posix/include/rtos/alloc.h @@ -97,29 +97,6 @@ static inline void *rballoc(uint32_t flags, size_t bytes) return rballoc_align(flags, bytes, PLATFORM_DCACHE_ALIGN); } -/** - * Changes size of the memory block allocated. - * @param ptr Address of the block to resize. - * @param flags Flags, see SOF_MEM_FLAG_... - * @param bytes New size in bytes. - * @param old_bytes Old size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the resized memory of NULL if failed. - */ -void *rbrealloc_align(void *ptr, uint32_t flags, size_t bytes, - size_t old_bytes, uint32_t alignment); - -/** - * Similar to rballoc_align(), returns resized buffer aligned to - * PLATFORM_DCACHE_ALIGN. - */ -static inline void *rbrealloc(void *ptr, uint32_t flags, - size_t bytes, size_t old_bytes) -{ - return rbrealloc_align(ptr, flags, bytes, old_bytes, - PLATFORM_DCACHE_ALIGN); -} - /** * Frees the memory block. * @param ptr Pointer to the memory block. @@ -141,6 +118,7 @@ void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, size_t alignment); void sof_heap_free(struct k_heap *heap, void *addr); struct k_heap *sof_sys_heap_get(void); +struct k_heap *sof_sys_user_heap_get(void); /** * Calculates length of the null-terminated string. diff --git a/posix/include/sof/lib/dma.h b/posix/include/sof/lib/dma.h index 960cfd469215..cb829dcaa21a 100644 --- a/posix/include/sof/lib/dma.h +++ b/posix/include/sof/lib/dma.h @@ -35,6 +35,7 @@ struct comp_buffer; struct comp_dev; +struct k_heap; /** \addtogroup sof_dma_drivers DMA Drivers * DMA Drivers API specification. @@ -511,13 +512,14 @@ static inline void dma_sg_init(struct dma_sg_elem_array *ea) ea->elems = NULL; } -int dma_sg_alloc(struct dma_sg_elem_array *ea, +int dma_sg_alloc(struct k_heap *heap, + struct dma_sg_elem_array *ea, uint32_t flags, uint32_t direction, uint32_t buffer_count, uint32_t buffer_bytes, uintptr_t dma_buffer_addr, uintptr_t external_addr); -void dma_sg_free(struct dma_sg_elem_array *ea); +void dma_sg_free(struct k_heap *heap, struct dma_sg_elem_array *ea); /** * \brief Get the total size of SG buffer diff --git a/src/audio/buffers/comp_buffer.c b/src/audio/buffers/comp_buffer.c index d6562b8d821f..77471194dfa6 100644 --- a/src/audio/buffers/comp_buffer.c +++ b/src/audio/buffers/comp_buffer.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -147,28 +146,17 @@ static void comp_buffer_free(struct sof_audio_buffer *audio_buffer) struct comp_buffer *buffer = container_of(audio_buffer, struct comp_buffer, audio_buffer); - struct buffer_cb_free cb_data = { - .buffer = buffer, - }; - buf_dbg(buffer, "buffer_free()"); - notifier_event(buffer, NOTIFIER_ID_BUFFER_FREE, - NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data)); - - /* In case some listeners didn't unregister from buffer's callbacks */ - notifier_unregister_all(NULL, buffer); +#if CONFIG_PROBE + if (buffer->probe_cb_free) + buffer->probe_cb_free(buffer->probe_cb_arg); +#endif struct k_heap *heap = buffer->audio_buffer.heap; - rfree(buffer->stream.addr); + sof_heap_free(heap, buffer->stream.addr); sof_heap_free(heap, buffer); - if (heap) { - struct dp_heap_user *mod_heap_user = container_of(heap, struct dp_heap_user, heap); - - if (!--mod_heap_user->client_count) - rfree(mod_heap_user); - } } APP_TASK_DATA static const struct source_ops comp_buffer_source_ops = { @@ -218,6 +206,7 @@ static struct comp_buffer *buffer_alloc_struct(struct k_heap *heap, memset(buffer, 0, sizeof(*buffer)); + buffer->heap = heap; buffer->flags = flags; /* Force channels to 2 for init to prevent bad call to clz in buffer_init_stream */ buffer->stream.runtime_stream_params.channels = 2; @@ -254,7 +243,7 @@ struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flag return NULL; } - stream_addr = rballoc_align(flags, size, align); + stream_addr = sof_heap_alloc(heap, flags, size, align); if (!stream_addr) { tr_err(&buffer_tr, "could not alloc size = %zu bytes of flags = 0x%x", size, flags); @@ -264,9 +253,11 @@ struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flag buffer = buffer_alloc_struct(heap, stream_addr, size, flags, is_shared); if (!buffer) { tr_err(&buffer_tr, "could not alloc buffer structure"); - rfree(stream_addr); + sof_heap_free(heap, stream_addr); } + buffer->heap = heap; + return buffer; } @@ -292,7 +283,7 @@ struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_siz preferred_size += minimum_size - preferred_size % minimum_size; for (size = preferred_size; size >= minimum_size; size -= minimum_size) { - stream_addr = rballoc_align(flags, size, align); + stream_addr = sof_heap_alloc(heap, flags, size, align); if (stream_addr) break; } @@ -308,9 +299,11 @@ struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_siz buffer = buffer_alloc_struct(heap, stream_addr, size, flags, is_shared); if (!buffer) { tr_err(&buffer_tr, "could not alloc buffer structure"); - rfree(stream_addr); + sof_heap_free(heap, stream_addr); } + buffer->heap = heap; + return buffer; } @@ -341,14 +334,8 @@ int buffer_set_size(struct comp_buffer *buffer, uint32_t size, uint32_t alignmen if (size == audio_stream_get_size(&buffer->stream)) return 0; - if (!alignment) - new_ptr = rbrealloc(audio_stream_get_addr(&buffer->stream), - buffer->flags | SOF_MEM_FLAG_NO_COPY, - size, audio_stream_get_size(&buffer->stream)); - else - new_ptr = rbrealloc_align(audio_stream_get_addr(&buffer->stream), - buffer->flags | SOF_MEM_FLAG_NO_COPY, size, - audio_stream_get_size(&buffer->stream), alignment); + new_ptr = sof_heap_alloc(buffer->heap, buffer->flags, size, alignment); + /* we couldn't allocate bigger chunk */ if (!new_ptr && size > audio_stream_get_size(&buffer->stream)) { buf_err(buffer, "resize can't alloc %u bytes of flags 0x%x", @@ -357,8 +344,10 @@ int buffer_set_size(struct comp_buffer *buffer, uint32_t size, uint32_t alignmen } /* use bigger chunk, else just use the old chunk but set smaller */ - if (new_ptr) + if (new_ptr) { + sof_heap_free(buffer->heap, audio_stream_get_addr(&buffer->stream)); buffer->stream.addr = new_ptr; + } buffer_init_stream(buffer, size); @@ -389,22 +378,11 @@ int buffer_set_size_range(struct comp_buffer *buffer, size_t preferred_size, siz if (preferred_size == actual_size) return 0; - if (!alignment) { - for (new_size = preferred_size; new_size >= minimum_size; - new_size -= minimum_size) { - new_ptr = rbrealloc(ptr, buffer->flags | SOF_MEM_FLAG_NO_COPY, - new_size, actual_size); - if (new_ptr) - break; - } - } else { - for (new_size = preferred_size; new_size >= minimum_size; - new_size -= minimum_size) { - new_ptr = rbrealloc_align(ptr, buffer->flags | SOF_MEM_FLAG_NO_COPY, - new_size, actual_size, alignment); - if (new_ptr) - break; - } + for (new_size = preferred_size; new_size >= minimum_size; + new_size -= minimum_size) { + new_ptr = sof_heap_alloc(buffer->heap, buffer->flags, new_size, alignment); + if (new_ptr) + break; } /* we couldn't allocate bigger chunk */ @@ -415,8 +393,10 @@ int buffer_set_size_range(struct comp_buffer *buffer, size_t preferred_size, siz } /* use bigger chunk, else just use the old chunk but set smaller */ - if (new_ptr) + if (new_ptr) { + sof_heap_free(buffer->heap, audio_stream_get_addr(&buffer->stream)); buffer->stream.addr = new_ptr; + } buffer_init_stream(buffer, new_size); @@ -478,12 +458,6 @@ bool buffer_params_match(struct comp_buffer *buffer, void comp_update_buffer_produce(struct comp_buffer *buffer, uint32_t bytes) { - struct buffer_cb_transact cb_data = { - .buffer = buffer, - .transaction_amount = bytes, - .transaction_begin_address = audio_stream_get_wptr(&buffer->stream), - }; - /* return if no bytes */ if (!bytes) { #if CONFIG_SOF_LOG_DBG_BUFFER @@ -499,10 +473,19 @@ void comp_update_buffer_produce(struct comp_buffer *buffer, uint32_t bytes) return; } - audio_stream_produce(&buffer->stream, bytes); +#if CONFIG_PROBE + if (buffer->probe_cb_produce) { + struct buffer_cb_transact cb_data = { + .buffer = buffer, + .transaction_amount = bytes, + .transaction_begin_address = audio_stream_get_wptr(&buffer->stream), + }; - notifier_event(buffer, NOTIFIER_ID_BUFFER_PRODUCE, - NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data)); + buffer->probe_cb_produce(buffer->probe_cb_arg, &cb_data); + } +#endif + + audio_stream_produce(&buffer->stream, bytes); #if CONFIG_SOF_LOG_DBG_BUFFER buf_dbg(buffer, "((buffer->avail << 16) | buffer->free) = %08x, ((buffer->id << 16) | buffer->size) = %08x", @@ -519,12 +502,6 @@ void comp_update_buffer_produce(struct comp_buffer *buffer, uint32_t bytes) void comp_update_buffer_consume(struct comp_buffer *buffer, uint32_t bytes) { - struct buffer_cb_transact cb_data = { - .buffer = buffer, - .transaction_amount = bytes, - .transaction_begin_address = audio_stream_get_rptr(&buffer->stream), - }; - CORE_CHECK_STRUCT(&buffer->audio_buffer); /* return if no bytes */ @@ -544,9 +521,6 @@ void comp_update_buffer_consume(struct comp_buffer *buffer, uint32_t bytes) audio_stream_consume(&buffer->stream, bytes); - notifier_event(buffer, NOTIFIER_ID_BUFFER_CONSUME, - NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data)); - #if CONFIG_SOF_LOG_DBG_BUFFER buf_dbg(buffer, "(buffer->avail << 16) | buffer->free = %08x, (buffer->id << 16) | buffer->size = %08x, (buffer->r_ptr - buffer->addr) << 16 | (buffer->w_ptr - buffer->addr)) = %08x", (audio_stream_get_avail_bytes(&buffer->stream) << 16) | diff --git a/src/audio/component.c b/src/audio/component.c index 941be20f8534..ce98f6428953 100644 --- a/src/audio/component.c +++ b/src/audio/component.c @@ -36,7 +36,7 @@ LOG_MODULE_REGISTER(component, CONFIG_SOF_LOG_LEVEL); -static SHARED_DATA struct comp_driver_list cd; +static APP_SYSUSER_BSS SHARED_DATA struct comp_driver_list cd; SOF_DEFINE_REG_UUID(component); @@ -699,3 +699,11 @@ void comp_update_ibs_obs_cpc(struct comp_dev *dev) #endif } +#ifdef CONFIG_SOF_USERSPACE_LL +void comp_grant_access_to_thread(const struct comp_dev *dev, struct k_thread *th) +{ + assert(dev->list_mutex); + tr_dbg(&ipc_tr, "grant access to mutex %p for thread %p", dev->list_mutex, th); + k_thread_access_grant(th, dev->list_mutex); +} +#endif diff --git a/src/audio/copier/copier.c b/src/audio/copier/copier.c index 0468664c0634..b25a7d69056a 100644 --- a/src/audio/copier/copier.c +++ b/src/audio/copier/copier.c @@ -1187,7 +1187,7 @@ __cold static int copier_unbind(struct processing_module *mod, struct bind_info return 0; } -static struct module_endpoint_ops copier_endpoint_ops = { +static APP_TASK_DATA const struct module_endpoint_ops copier_endpoint_ops = { .get_total_data_processed = copier_get_processed_data, .position = copier_position, .dai_ts_config = copier_dai_ts_config_op, @@ -1198,7 +1198,7 @@ static struct module_endpoint_ops copier_endpoint_ops = { .trigger = copier_comp_trigger }; -static const struct module_interface copier_interface = { +static APP_TASK_DATA const struct module_interface copier_interface = { .init = copier_init, .prepare = copier_prepare, .process_audio_stream = copier_process, diff --git a/src/audio/copier/copier_dai.c b/src/audio/copier/copier_dai.c index ceb93b2a22bf..1e82339556ba 100644 --- a/src/audio/copier/copier_dai.c +++ b/src/audio/copier/copier_dai.c @@ -208,6 +208,8 @@ __cold static int copier_dai_init(struct comp_dev *dev, if (!dd) return -ENOMEM; memset(dd, 0, sizeof(*dd)); + dd->chan_index = -1; + comp_info(dev, "dd %p initialized, index %d", dd, dd->chan_index); ret = dai_common_new(dd, dev, dai); if (ret < 0) diff --git a/src/audio/copier/host_copier.h b/src/audio/copier/host_copier.h index 28605ed9c3ab..5192506da42f 100644 --- a/src/audio/copier/host_copier.h +++ b/src/audio/copier/host_copier.h @@ -50,10 +50,11 @@ struct host_data { /* local DMA config */ #if CONFIG_ZEPHYR_NATIVE_DRIVERS struct sof_dma *dma; + int chan_index; #else struct dma *dma; -#endif struct dma_chan_data *chan; +#endif struct dma_sg_config config; #ifdef __ZEPHYR__ struct dma_config z_config; @@ -112,6 +113,7 @@ struct host_data { uint64_t next_sync; uint64_t period_in_cycles; #endif + struct k_heap *heap; }; int host_common_new(struct host_data *hd, struct comp_dev *dev, diff --git a/src/audio/dai-legacy.c b/src/audio/dai-legacy.c index 11179334e6fe..655fa94afb86 100644 --- a/src/audio/dai-legacy.c +++ b/src/audio/dai-legacy.c @@ -379,7 +379,7 @@ static int dai_playback_params(struct comp_dev *dev, uint32_t period_bytes, comp_info(dev, "fifo 0x%x", fifo); - err = dma_sg_alloc(&config->elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &config->elem_array, SOF_MEM_FLAG_USER, config->direction, period_count, period_bytes, @@ -444,7 +444,7 @@ static int dai_capture_params(struct comp_dev *dev, uint32_t period_bytes, comp_info(dev, "fifo 0x%x", fifo); - err = dma_sg_alloc(&config->elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &config->elem_array, SOF_MEM_FLAG_USER, config->direction, period_count, period_bytes, @@ -709,7 +709,7 @@ void dai_common_reset(struct dai_data *dd, struct comp_dev *dev) if (!dd->delayed_dma_stop) dai_dma_release(dd, dev); - dma_sg_free(&config->elem_array); + dma_sg_free(NULL, &config->elem_array); if (dd->dma_buffer) { buffer_free(dd->dma_buffer); diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index 37b0b7c911ea..29c4cbd379b4 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -201,55 +202,62 @@ __cold int dai_set_config(struct dai *dai, struct ipc_config_dai *common_config, /* called from ipc/ipc3/dai.c */ int dai_get_handshake(struct dai *dai, int direction, int stream_id) { - k_spinlock_key_t key = k_spin_lock(&dai->lock); - const struct dai_properties *props = dai_get_properties(dai->dev, direction, - stream_id); - int hs_id = props->dma_hs_id; + struct dai_properties props; + int ret; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + ret = dai_get_properties_copy(dai->dev, direction, stream_id, &props); + k_mutex_unlock(dai->lock); + if (ret < 0) + return ret; - return hs_id; + return props.dma_hs_id; } /* called from ipc/ipc3/dai.c and ipc/ipc4/dai.c */ int dai_get_fifo_depth(struct dai *dai, int direction) { - const struct dai_properties *props; - k_spinlock_key_t key; - int fifo_depth; + struct dai_properties props; + int ret; if (!dai) return 0; - key = k_spin_lock(&dai->lock); - props = dai_get_properties(dai->dev, direction, 0); - fifo_depth = props->fifo_depth; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + ret = dai_get_properties_copy(dai->dev, direction, 0, &props); + k_mutex_unlock(dai->lock); + if (ret < 0) + return 0; - return fifo_depth; + return props.fifo_depth; } int dai_get_stream_id(struct dai *dai, int direction) { - k_spinlock_key_t key = k_spin_lock(&dai->lock); - const struct dai_properties *props = dai_get_properties(dai->dev, direction, 0); - int stream_id = props->stream_id; + struct dai_properties props; + int ret; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + ret = dai_get_properties_copy(dai->dev, direction, 0, &props); + k_mutex_unlock(dai->lock); + if (ret < 0) + return ret; - return stream_id; + return props.stream_id; } static int dai_get_fifo(struct dai *dai, int direction, int stream_id) { - k_spinlock_key_t key = k_spin_lock(&dai->lock); - const struct dai_properties *props = dai_get_properties(dai->dev, direction, - stream_id); - int fifo_address = props->fifo_address; + struct dai_properties props; + int ret; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + ret = dai_get_properties_copy(dai->dev, direction, stream_id, &props); + k_mutex_unlock(dai->lock); + if (ret < 0) + return ret; - return fifo_address; + return props.fifo_address; } /* this is called by DMA driver every time descriptor has completed */ @@ -499,11 +507,33 @@ __cold int dai_common_new(struct dai_data *dd, struct comp_dev *dev, return -ENODEV; } - k_spinlock_init(&dd->dai->lock); +#ifdef CONFIG_SOF_USERSPACE_LL + dd->dai->lock = k_object_alloc(K_OBJ_MUTEX); + comp_set_drvdata(dev, dd); +#else + dd->dai->lock = &dd->dai->lock_obj; +#endif + k_mutex_init(dd->dai->lock); + LOG_INF("dai mutex %p", dd->dai->lock); + +#ifdef CONFIG_SOF_USERSPACE_LL + /* Temporary: give access to audio and IPC threads */ + k_thread_access_grant(ipc_get()->ipc_user_pdata->thread, dd->dai->lock); + k_thread_access_grant(ipc_get()->ipc_user_pdata->audio_thread, dd->dai->lock); + LOG_INF("gave access to DAI mutex to threads IPC %p audio %p", ipc_get()->ipc_user_pdata->thread, ipc_get()->ipc_user_pdata->audio_thread); +#endif dma_sg_init(&dd->config.elem_array); dd->xrun = 0; - dd->chan = NULL; + +#ifdef CONFIG_SOF_USERSPACE_LL + /* + * copier_dai_create() uses mod_zalloc() to allocate + * the 'dd' dai data object and does not set dd->heap. + * If LL is run in user-space, assign the 'heap' here. + */ + dd->heap = zephyr_ll_user_heap(); +#endif /* I/O performance init, keep it last so the function does not reach this in case * of return on error, so that we do not waste a slot @@ -557,6 +587,7 @@ __cold static struct comp_dev *dai_new(const struct comp_driver *drv, struct comp_dev *dev; const struct ipc_config_dai *dai_cfg = spec; struct dai_data *dd; + struct k_heap *heap = NULL; int ret; assert_can_be_cold(); @@ -569,10 +600,19 @@ __cold static struct comp_dev *dai_new(const struct comp_driver *drv, dev->ipc_config = *config; - dd = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*dd)); +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif + + dd = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*dd), 0); if (!dd) goto e_data; + memset(dd, 0, sizeof(*dd)); + dd->heap = heap; + dd->chan_index = -1; + comp_info(dev, "dd %p initialized, index %d", dd, dd->chan_index); + comp_set_drvdata(dev, dd); ret = dai_common_new(dd, dev, dai_cfg); @@ -586,7 +626,7 @@ __cold static struct comp_dev *dai_new(const struct comp_driver *drv, return dev; error: - rfree(dd); + sof_heap_free(dd->heap, dd); e_data: comp_free_device(dev); return NULL; @@ -603,18 +643,20 @@ __cold void dai_common_free(struct dai_data *dd) if (dd->group) dai_group_put(dd->group); - if (dd->chan) { - sof_dma_release_channel(dd->dma, dd->chan->index); - dd->chan->dev_data = NULL; - } + if (dd->chan_index != -1) + sof_dma_release_channel(dd->dma, dd->chan_index); sof_dma_put(dd->dma); dai_release_llp_slot(dd); +#ifdef CONFIG_SOF_USERSPACE_LL + k_object_free(dd->dai->lock); +#endif + dai_put(dd->dai); - rfree(dd->dai_spec_config); + sof_heap_free(dd->heap, dd->dai_spec_config); } __cold static void dai_free(struct comp_dev *dev) @@ -628,7 +670,7 @@ __cold static void dai_free(struct comp_dev *dev) dai_common_free(dd); - rfree(dd); + sof_heap_free(dd->heap, dd); comp_free_device(dev); } @@ -823,7 +865,7 @@ static int dai_set_sg_config(struct dai_data *dd, struct comp_dev *dev, uint32_t } while (--max_block_count > 0); } - err = dma_sg_alloc(&config->elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(dd->heap, &config->elem_array, SOF_MEM_FLAG_USER, config->direction, period_count, period_bytes, @@ -849,8 +891,9 @@ static int dai_set_dma_config(struct dai_data *dd, struct comp_dev *dev) comp_dbg(dev, "entry"); - dma_cfg = rballoc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, - sizeof(struct dma_config)); + dma_cfg = sof_heap_alloc(dd->heap, + SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, + sizeof(struct dma_config), 0); if (!dma_cfg) { comp_err(dev, "dma_cfg allocation failed"); return -ENOMEM; @@ -879,10 +922,11 @@ static int dai_set_dma_config(struct dai_data *dd, struct comp_dev *dev) else dma_cfg->dma_slot = config->src_dev; - dma_block_cfg = rballoc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, - sizeof(struct dma_block_config) * dma_cfg->block_count); + dma_block_cfg = sof_heap_alloc(dd->heap, + SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, + sizeof(struct dma_block_config) * dma_cfg->block_count, 0); if (!dma_block_cfg) { - rfree(dma_cfg); + sof_heap_free(dd->heap, dma_cfg); comp_err(dev, "dma_block_config allocation failed"); return -ENOMEM; } @@ -1016,7 +1060,7 @@ static int dai_set_dma_buffer(struct dai_data *dd, struct comp_dev *dev, return err; } } else { - dd->dma_buffer = buffer_alloc_range(NULL, buffer_size_preferred, buffer_size, + dd->dma_buffer = buffer_alloc_range(dd->heap, buffer_size_preferred, buffer_size, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_DMA, addr_align, BUFFER_USAGE_NOT_SHARED); if (!dd->dma_buffer) { @@ -1104,8 +1148,8 @@ int dai_common_params(struct dai_data *dd, struct comp_dev *dev, if (err < 0) { buffer_free(dd->dma_buffer); dd->dma_buffer = NULL; - dma_sg_free(&config->elem_array); - rfree(dd->z_config); + dma_sg_free(dd->heap, &config->elem_array); + sof_heap_free(dd->heap, dd->z_config); dd->z_config = NULL; } @@ -1136,9 +1180,9 @@ int dai_common_config_prepare(struct dai_data *dd, struct comp_dev *dev) return -EINVAL; } - if (dd->chan) { + if (dd->chan_index != -1) { comp_info(dev, "dma channel index %d already configured", - dd->chan->index); + dd->chan_index); return 0; } @@ -1152,18 +1196,14 @@ int dai_common_config_prepare(struct dai_data *dd, struct comp_dev *dev) } /* get DMA channel */ - channel = sof_dma_request_channel(dd->dma, channel); - if (channel < 0) { + dd->chan_index = sof_dma_request_channel(dd->dma, channel); + if (dd->chan_index < 0) { comp_err(dev, "dma_request_channel() failed"); - dd->chan = NULL; return -EIO; } - dd->chan = &dd->dma->chan[channel]; - dd->chan->dev_data = dd; - comp_dbg(dev, "new configured dma channel index %d", - dd->chan->index); + dd->chan_index); return 0; } @@ -1174,8 +1214,8 @@ int dai_common_prepare(struct dai_data *dd, struct comp_dev *dev) dd->total_data_processed = 0; - if (!dd->chan) { - comp_err(dev, "Missing dd->chan."); + if (dd->chan_index == -1) { + comp_err(dev, "Missing dd->chan_index."); comp_set_state(dev, COMP_TRIGGER_RESET); return -EINVAL; } @@ -1196,7 +1236,7 @@ int dai_common_prepare(struct dai_data *dd, struct comp_dev *dev) return 0; } - ret = sof_dma_config(dd->chan->dma, dd->chan->index, dd->z_config); + ret = sof_dma_config(dd->dma, dd->chan_index, dd->z_config); if (ret < 0) comp_set_state(dev, COMP_TRIGGER_RESET); @@ -1235,10 +1275,10 @@ void dai_common_reset(struct dai_data *dd, struct comp_dev *dev) if (!dd->delayed_dma_stop) dai_dma_release(dd, dev); - dma_sg_free(&config->elem_array); + dma_sg_free(dd->heap, &config->elem_array); if (dd->z_config) { - rfree(dd->z_config->head_block); - rfree(dd->z_config); + sof_heap_free(dd->heap, dd->z_config->head_block); + sof_heap_free(dd->heap, dd->z_config); dd->z_config = NULL; } @@ -1283,7 +1323,7 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, /* only start the DAI if we are not XRUN handling */ if (dd->xrun == 0) { - ret = sof_dma_start(dd->chan->dma, dd->chan->index); + ret = sof_dma_start(dd->dma, dd->chan_index); if (ret < 0) return ret; @@ -1321,16 +1361,16 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, /* only start the DAI if we are not XRUN handling */ if (dd->xrun == 0) { /* recover valid start position */ - ret = sof_dma_stop(dd->chan->dma, dd->chan->index); + ret = sof_dma_stop(dd->dma, dd->chan_index); if (ret < 0) return ret; /* dma_config needed after stop */ - ret = sof_dma_config(dd->chan->dma, dd->chan->index, dd->z_config); + ret = sof_dma_config(dd->dma, dd->chan_index, dd->z_config); if (ret < 0) return ret; - ret = sof_dma_start(dd->chan->dma, dd->chan->index); + ret = sof_dma_start(dd->dma, dd->chan_index); if (ret < 0) return ret; @@ -1358,11 +1398,11 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, * as soon as possible. */ #if CONFIG_COMP_DAI_STOP_TRIGGER_ORDER_REVERSE - ret = sof_dma_stop(dd->chan->dma, dd->chan->index); + ret = sof_dma_stop(dd->dma, dd->chan_index); dai_trigger_op(dd->dai, cmd, dev->direction); #else dai_trigger_op(dd->dai, cmd, dev->direction); - ret = sof_dma_stop(dd->chan->dma, dd->chan->index); + ret = sof_dma_stop(dd->dma, dd->chan_index); if (ret) { comp_warn(dev, "dma was stopped earlier"); ret = 0; @@ -1372,11 +1412,11 @@ static int dai_comp_trigger_internal(struct dai_data *dd, struct comp_dev *dev, case COMP_TRIGGER_PAUSE: comp_dbg(dev, "PAUSE"); #if CONFIG_COMP_DAI_STOP_TRIGGER_ORDER_REVERSE - ret = sof_dma_suspend(dd->chan->dma, dd->chan->index); + ret = sof_dma_suspend(dd->dma, dd->chan_index); dai_trigger_op(dd->dai, cmd, dev->direction); #else dai_trigger_op(dd->dai, cmd, dev->direction); - ret = sof_dma_suspend(dd->chan->dma, dd->chan->index); + ret = sof_dma_suspend(dd->dma, dd->chan_index); #endif break; case COMP_TRIGGER_PRE_START: @@ -1474,7 +1514,7 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd) */ static int dai_get_status(struct comp_dev *dev, struct dai_data *dd, struct dma_status *stat) { - int ret = sof_dma_get_status(dd->chan->dma, dd->chan->index, stat); + int ret = sof_dma_get_status(dd->dma, dd->chan_index, stat); #if CONFIG_XRUN_NOTIFICATIONS_ENABLE if (ret == -EPIPE && !dd->xrun_notification_sent) { dd->xrun_notification_sent = send_copier_gateway_xrun_notif_msg @@ -1579,7 +1619,7 @@ int dai_zephyr_multi_endpoint_copy(struct dai_data **dd, struct comp_dev *dev, #endif for (i = 0; i < num_endpoints; i++) { - ret = sof_dma_reload(dd[i]->chan->dma, dd[i]->chan->index, 0); + ret = sof_dma_reload(dd[i]->dma, dd[i]->chan_index, 0); if (ret < 0) { dai_report_reload_xrun(dd[i], dev, 0); return ret; @@ -1605,10 +1645,10 @@ int dai_zephyr_multi_endpoint_copy(struct dai_data **dd, struct comp_dev *dev, status = dai_dma_multi_endpoint_cb(dd[i], dev, frames, multi_endpoint_buffer); if (status == SOF_DMA_CB_STATUS_END) - sof_dma_stop(dd[i]->chan->dma, dd[i]->chan->index); + sof_dma_stop(dd[i]->dma, dd[i]->chan_index); copy_bytes = frames * audio_stream_frame_bytes(&dd[i]->dma_buffer->stream); - ret = sof_dma_reload(dd[i]->chan->dma, dd[i]->chan->index, copy_bytes); + ret = sof_dma_reload(dd[i]->dma, dd[i]->chan_index, copy_bytes); if (ret < 0) { dai_report_reload_xrun(dd[i], dev, copy_bytes); return ret; @@ -1797,7 +1837,7 @@ int dai_common_copy(struct dai_data *dd, struct comp_dev *dev, pcm_converter_fun comp_warn(dev, "nothing to copy, src_frames: %u, sink_frames: %u", src_frames, sink_frames); #endif - sof_dma_reload(dd->chan->dma, dd->chan->index, 0); + sof_dma_reload(dd->dma, dd->chan_index, 0); return 0; } @@ -1807,9 +1847,9 @@ int dai_common_copy(struct dai_data *dd, struct comp_dev *dev, pcm_converter_fun comp_warn(dev, "dai trigger copy failed"); if (dai_dma_cb(dd, dev, copy_bytes, converter) == SOF_DMA_CB_STATUS_END) - sof_dma_stop(dd->chan->dma, dd->chan->index); + sof_dma_stop(dd->dma, dd->chan_index); - ret = sof_dma_reload(dd->chan->dma, dd->chan->index, copy_bytes); + ret = sof_dma_reload(dd->dma, dd->chan_index, copy_bytes); if (ret < 0) { dai_report_reload_xrun(dd, dev, copy_bytes); return ret; @@ -1847,7 +1887,7 @@ int dai_common_ts_config_op(struct dai_data *dd, struct comp_dev *dev) struct dai_ts_cfg *cfg = &dd->ts_config; comp_dbg(dev, "dai_ts_config()"); - if (!dd->chan) { + if (dd->chan_index == -1) { comp_err(dev, "No DMA channel information"); return -EINVAL; } @@ -1870,7 +1910,7 @@ int dai_common_ts_config_op(struct dai_data *dd, struct comp_dev *dev) cfg->direction = dai->direction; cfg->index = dd->dai->index; cfg->dma_id = dd->dma->plat_data.id; - cfg->dma_chan_index = dd->chan->index; + cfg->dma_chan_index = dd->chan_index; cfg->dma_chan_count = dd->dma->plat_data.channels; return dai_ts_config(dd->dai->dev, cfg); @@ -1928,17 +1968,18 @@ static int dai_ts_stop_op(struct comp_dev *dev) uint32_t dai_get_init_delay_ms(struct dai *dai) { - const struct dai_properties *props; - k_spinlock_key_t key; - uint32_t init_delay; + struct dai_properties props; + uint32_t init_delay = 0; + int ret; if (!dai) return 0; - key = k_spin_lock(&dai->lock); - props = dai_get_properties(dai->dev, 0, 0); - init_delay = props->reg_init_delay; - k_spin_unlock(&dai->lock, key); + k_mutex_lock(dai->lock, K_FOREVER); + ret = dai_get_properties_copy(dai->dev, 0, 0, &props); + if (!ret) + init_delay = props.reg_init_delay; + k_mutex_unlock(dai->lock); return init_delay; } diff --git a/src/audio/host-legacy.c b/src/audio/host-legacy.c index a16b3f74e1c3..36c259b7f23c 100644 --- a/src/audio/host-legacy.c +++ b/src/audio/host-legacy.c @@ -440,7 +440,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, uint32 elem_array = &hd->local.elem_array; /* config buffer will be used as proxy */ - err = dma_sg_alloc(&hd->config.elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &hd->config.elem_array, SOF_MEM_FLAG_USER, dir, 1, 0, 0, 0); if (err < 0) { comp_err(dev, "dma_sg_alloc() failed"); @@ -450,7 +450,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, uint32 elem_array = &hd->config.elem_array; } - err = dma_sg_alloc(elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, + err = dma_sg_alloc(NULL, elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, buffer_bytes, (uintptr_t)(audio_stream_get_addr(&hd->dma_buffer->stream)), 0); if (err < 0) { @@ -602,7 +602,7 @@ void host_common_free(struct host_data *hd) dma_put(hd->dma); ipc_msg_free(hd->msg); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(NULL, &hd->config.elem_array); } static void host_free(struct comp_dev *dev) @@ -905,9 +905,9 @@ void host_common_reset(struct host_data *hd, uint16_t state) } /* free all DMA elements */ - dma_sg_free(&hd->host.elem_array); - dma_sg_free(&hd->local.elem_array); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(NULL, &hd->host.elem_array); + dma_sg_free(NULL, &hd->local.elem_array); + dma_sg_free(NULL, &hd->config.elem_array); /* It's safe that cleaning out `hd->config` after `dma_sg_free` for config.elem_array */ memset(&hd->config, 0, sizeof(hd->config)); diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c index bdb5c5759274..7ae35d88a7a9 100644 --- a/src/audio/host-zephyr.c +++ b/src/audio/host-zephyr.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -83,7 +84,7 @@ static int host_dma_set_config_and_copy(struct host_data *hd, struct comp_dev *d local_elem->size = bytes; /* reconfigure transfer */ - ret = sof_dma_config(hd->chan->dma, hd->chan->index, &hd->z_config); + ret = sof_dma_config(hd->dma, hd->chan_index, &hd->z_config); if (ret < 0) { comp_err(dev, "dma_config() failed, ret = %d", ret); @@ -92,7 +93,7 @@ static int host_dma_set_config_and_copy(struct host_data *hd, struct comp_dev *d cb(dev, bytes); - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, bytes); + ret = sof_dma_reload(hd->dma, hd->chan_index, bytes); if (ret < 0) { comp_err(dev, "dma_copy() failed, ret = %d", ret); @@ -222,7 +223,7 @@ static int host_copy_one_shot(struct host_data *hd, struct comp_dev *dev, copy_c hd->z_config.head_block->block_size = local_elem->size; /* reconfigure transfer */ - ret = sof_dma_config(hd->chan->dma, hd->chan->index, &hd->z_config); + ret = sof_dma_config(hd->dma, hd->chan_index, &hd->z_config); if (ret < 0) { comp_err(dev, "dma_config() failed, ret = %u", ret); return ret; @@ -230,7 +231,7 @@ static int host_copy_one_shot(struct host_data *hd, struct comp_dev *dev, copy_c cb(dev, copy_bytes); - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, copy_bytes); + ret = sof_dma_reload(hd->dma, hd->chan_index, copy_bytes); if (ret < 0) comp_err(dev, "dma_copy() failed, ret = %u", ret); @@ -364,7 +365,7 @@ static void host_dma_cb(struct comp_dev *dev, size_t bytes) /* get status from dma and check for xrun */ static int host_get_status(struct comp_dev *dev, struct host_data *hd, struct dma_status *stat) { - int ret = sof_dma_get_status(hd->chan->dma, hd->chan->index, stat); + int ret = sof_dma_get_status(hd->dma, hd->chan_index, stat); #if CONFIG_XRUN_NOTIFICATIONS_ENABLE if (ret == -EPIPE && !hd->xrun_notification_sent) { hd->xrun_notification_sent = send_copier_gateway_xrun_notif_msg @@ -551,7 +552,7 @@ static int host_copy_normal(struct host_data *hd, struct comp_dev *dev, copy_cal if (!copy_bytes) { if (hd->partial_size != 0) { if (stream_sync(hd, dev)) { - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, + ret = sof_dma_reload(hd->dma, hd->chan_index, hd->partial_size); if (ret < 0) comp_err(dev, "dma_reload() failed, ret = %u", ret); @@ -578,7 +579,7 @@ static int host_copy_normal(struct host_data *hd, struct comp_dev *dev, copy_cal hd->dma_buffer_size - hd->partial_size <= (2 + threshold) * hd->period_bytes) { if (stream_sync(hd, dev)) { - ret = sof_dma_reload(hd->chan->dma, hd->chan->index, + ret = sof_dma_reload(hd->dma, hd->chan_index, hd->partial_size); if (ret < 0) comp_err(dev, "dma_reload() failed, ret = %u", ret); @@ -605,7 +606,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, elem_array = &hd->local.elem_array; /* config buffer will be used as proxy */ - err = dma_sg_alloc(&hd->config.elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(hd->heap, &hd->config.elem_array, SOF_MEM_FLAG_USER, dir, 1, 0, 0, 0); if (err < 0) { comp_err(dev, "dma_sg_alloc() failed"); @@ -615,7 +616,7 @@ static int create_local_elems(struct host_data *hd, struct comp_dev *dev, elem_array = &hd->config.elem_array; } - err = dma_sg_alloc(elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, + err = dma_sg_alloc(hd->heap, elem_array, SOF_MEM_FLAG_USER, dir, buffer_count, buffer_bytes, (uintptr_t)audio_stream_get_addr(&hd->dma_buffer->stream), 0); if (err < 0) { @@ -646,7 +647,7 @@ int host_common_trigger(struct host_data *hd, struct comp_dev *dev, int cmd) if (cmd != COMP_TRIGGER_START && hd->copy_type == COMP_COPY_ONE_SHOT) return ret; - if (!hd->chan) { + if (hd->chan_index == -1) { comp_err(dev, "no dma channel configured"); return -EINVAL; } @@ -654,14 +655,14 @@ int host_common_trigger(struct host_data *hd, struct comp_dev *dev, int cmd) switch (cmd) { case COMP_TRIGGER_START: hd->partial_size = 0; - ret = sof_dma_start(hd->chan->dma, hd->chan->index); + ret = sof_dma_start(hd->dma, hd->chan_index); if (ret < 0) comp_err(dev, "dma_start() failed, ret = %u", ret); break; case COMP_TRIGGER_STOP: case COMP_TRIGGER_XRUN: - ret = sof_dma_stop(hd->chan->dma, hd->chan->index); + ret = sof_dma_stop(hd->dma, hd->chan_index); if (ret < 0) comp_err(dev, "dma stop failed: %d", ret); @@ -721,9 +722,18 @@ __cold int host_common_new(struct host_data *hd, struct comp_dev *dev, sof_dma_put(hd->dma); return -ENOMEM; } - hd->chan = NULL; + hd->chan_index = -1; hd->copy_type = COMP_COPY_NORMAL; +#ifdef CONFIG_SOF_USERSPACE_LL + /* + * copier_host_create() uses mod_zalloc() to allocate + * the 'hd' host data object and does not set hd->heap. + * If LL is run in user-space, assign the 'heap' here. + */ + hd->heap = zephyr_ll_user_heap(); +#endif + return 0; } @@ -734,6 +744,7 @@ __cold static struct comp_dev *host_new(const struct comp_driver *drv, struct comp_dev *dev; struct host_data *hd; const struct ipc_config_host *ipc_host = spec; + struct k_heap *heap = NULL; int ret; assert_can_be_cold(); @@ -745,10 +756,17 @@ __cold static struct comp_dev *host_new(const struct comp_driver *drv, return NULL; dev->ipc_config = *config; - hd = rzalloc(SOF_MEM_FLAG_USER, sizeof(*hd)); +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif + + hd = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, sizeof(*hd), 0); if (!hd) goto e_data; + memset(hd, 0, sizeof(*hd)); + hd->heap = heap; + hd->nobytes_last_logged = k_uptime_get(); comp_set_drvdata(dev, hd); @@ -761,7 +779,7 @@ __cold static struct comp_dev *host_new(const struct comp_driver *drv, return dev; e_dev: - rfree(hd); + sof_heap_free(heap, hd); e_data: comp_free_device(dev); return NULL; @@ -774,7 +792,7 @@ __cold void host_common_free(struct host_data *hd) sof_dma_put(hd->dma); ipc_msg_free(hd->msg); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(hd->heap, &hd->config.elem_array); } __cold static void host_free(struct comp_dev *dev) @@ -785,7 +803,7 @@ __cold static void host_free(struct comp_dev *dev) comp_dbg(dev, "entry"); host_common_free(hd); - rfree(hd); + sof_heap_free(hd->heap, hd); comp_free_device(dev); } @@ -852,7 +870,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, uint32_t buffer_size_preferred; uint32_t addr_align; uint32_t align; - int i, channel, err; + int i, err; bool is_scheduling_source = dev == dev->pipeline->sched_comp; uint32_t round_up_size; @@ -943,7 +961,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, } } else { /* allocate not shared buffer */ - hd->dma_buffer = buffer_alloc_range(NULL, buffer_size_preferred, buffer_size, + hd->dma_buffer = buffer_alloc_range(hd->heap, buffer_size_preferred, buffer_size, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_DMA, addr_align, BUFFER_USAGE_NOT_SHARED); if (!hd->dma_buffer) { @@ -988,26 +1006,20 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, /* get DMA channel from DMAC * note: stream_tag is ignored by dw-dma */ - channel = sof_dma_request_channel(hd->dma, hda_chan); - if (channel < 0) { + hd->chan_index = sof_dma_request_channel(hd->dma, hda_chan); + if (hd->chan_index < 0) { comp_err(dev, "requested channel %d is busy", hda_chan); return -ENODEV; } - hd->chan = &hd->dma->chan[channel]; uint32_t buffer_addr = 0; uint32_t buffer_bytes = 0; uint32_t addr; - hd->chan->direction = config->direction; - hd->chan->desc_count = config->elem_array.count; - hd->chan->is_scheduling_source = config->is_scheduling_source; - hd->chan->period = config->period; - memset(dma_cfg, 0, sizeof(*dma_cfg)); - dma_block_cfg = rzalloc(SOF_MEM_FLAG_USER, - sizeof(*dma_block_cfg)); + dma_block_cfg = sof_heap_alloc(hd->heap, SOF_MEM_FLAG_USER, + sizeof(*dma_block_cfg), 0); if (!dma_block_cfg) { comp_err(dev, "dma_block_config allocation failed"); @@ -1015,6 +1027,8 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, goto err_release_channel; } + memset(dma_block_cfg, 0, sizeof(*dma_block_cfg)); + dma_cfg->block_count = 1; dma_cfg->source_data_size = config->src_width; dma_cfg->dest_data_size = config->dest_width; @@ -1050,7 +1064,7 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, break; } - err = sof_dma_config(hd->chan->dma, hd->chan->index, dma_cfg); + err = sof_dma_config(hd->dma, hd->chan_index, dma_cfg); if (err < 0) { comp_err(dev, "dma_config() failed"); goto err_free_block_cfg; @@ -1078,10 +1092,10 @@ int host_common_params(struct host_data *hd, struct comp_dev *dev, err_free_block_cfg: dma_cfg->head_block = NULL; - rfree(dma_block_cfg); + sof_heap_free(hd->heap, dma_block_cfg); err_release_channel: - sof_dma_release_channel(hd->dma, hd->chan->index); - hd->chan = NULL; + sof_dma_release_channel(hd->dma, hd->chan_index); + hd->chan_index = -1; return err; } @@ -1139,16 +1153,16 @@ static int host_position(struct comp_dev *dev, void host_common_reset(struct host_data *hd, uint16_t state) { - if (hd->chan) { - sof_dma_stop(hd->chan->dma, hd->chan->index); - sof_dma_release_channel(hd->dma, hd->chan->index); - hd->chan = NULL; + if (hd->chan_index != -1) { + sof_dma_stop(hd->dma, hd->chan_index); + sof_dma_release_channel(hd->dma, hd->chan_index); + hd->chan_index = -1; } /* free all DMA elements */ - dma_sg_free(&hd->host.elem_array); - dma_sg_free(&hd->local.elem_array); - dma_sg_free(&hd->config.elem_array); + dma_sg_free(hd->heap, &hd->host.elem_array); + dma_sg_free(hd->heap, &hd->local.elem_array); + dma_sg_free(hd->heap, &hd->config.elem_array); /* free DMA buffer */ if (hd->dma_buffer) { @@ -1158,7 +1172,7 @@ void host_common_reset(struct host_data *hd, uint16_t state) /* free DMA block configuration */ if (hd->z_config.head_block) - rfree(hd->z_config.head_block); + sof_heap_free(hd->heap, hd->z_config.head_block); /* reset buffer pointers */ hd->local_pos = 0; diff --git a/src/audio/mixin_mixout/mixin_mixout.c b/src/audio/mixin_mixout/mixin_mixout.c index 443c79dfad21..81dfb20e143e 100644 --- a/src/audio/mixin_mixout/mixin_mixout.c +++ b/src/audio/mixin_mixout/mixin_mixout.c @@ -707,7 +707,9 @@ static int mixin_prepare(struct processing_module *mod, int ret; comp_info(dev, "entry"); +#if CONFIG_XRUN_NOTIFICATIONS_ENABLE md->eos_delay_configured = false; +#endif ret = mixin_params(mod); if (ret < 0) diff --git a/src/audio/module_adapter/module_adapter.c b/src/audio/module_adapter/module_adapter.c index 9ccbcf33dca5..f30a1ba0969c 100644 --- a/src/audio/module_adapter/module_adapter.c +++ b/src/audio/module_adapter/module_adapter.c @@ -111,7 +111,12 @@ static struct processing_module *module_adapter_mem_alloc(const struct comp_driv } mod_heap = &mod_heap_user->heap; } else { +#ifdef CONFIG_SOF_USERSPACE_LL + mod_heap = zephyr_ll_user_heap(); + comp_cl_dbg(drv, "using ll user heap for module"); +#else mod_heap = drv->user_heap; +#endif mod_heap_user = NULL; heap_size = 0; } @@ -172,14 +177,17 @@ static void module_adapter_mem_free(struct processing_module *mod) sof_heap_free(mod_heap, mod->priv.cfg.input_pins); #endif sof_heap_free(mod_heap, mod->dev); - sof_heap_free(mod_heap, mod); - if (domain == COMP_PROCESSING_DOMAIN_DP) { - struct dp_heap_user *mod_heap_user = container_of(mod_heap, struct dp_heap_user, - heap); - - if (mod_heap && !--mod_heap_user->client_count) - rfree(mod_heap_user); + LOG_INF("mod"); +#ifdef CONFIG_SOF_USERSPACE_LL + if (domain != COMP_PROCESSING_DOMAIN_DP) { + mod_heap = zephyr_ll_user_heap(); + comp_cl_dbg(drv, "using ll user heap for module free"); } +#endif + comp_cl_info(drv, "free mod %p with heap %p", mod, mod_heap); + sof_heap_free(mod_heap, mod); + if (domain == COMP_PROCESSING_DOMAIN_DP && mod_heap) + dp_heap_put(mod_heap); } /* @@ -508,11 +516,13 @@ int module_adapter_prepare(struct comp_dev *dev) /* allocate memory for input buffers */ if (mod->max_sources) { mod->input_buffers = - rzalloc(memory_flags, sizeof(*mod->input_buffers) * mod->max_sources); + sof_heap_alloc(sof_sys_user_heap_get(), memory_flags, + sizeof(*mod->input_buffers) * mod->max_sources, 0); if (!mod->input_buffers) { comp_err(dev, "failed to allocate input buffers"); return -ENOMEM; } + memset(mod->input_buffers, 0, sizeof(*mod->input_buffers) * mod->max_sources); } else { mod->input_buffers = NULL; } @@ -520,12 +530,14 @@ int module_adapter_prepare(struct comp_dev *dev) /* allocate memory for output buffers */ if (mod->max_sinks) { mod->output_buffers = - rzalloc(memory_flags, sizeof(*mod->output_buffers) * mod->max_sinks); + sof_heap_alloc(sof_sys_user_heap_get(), memory_flags, + sizeof(*mod->output_buffers) * mod->max_sources, 0); if (!mod->output_buffers) { comp_err(dev, "failed to allocate output buffers"); ret = -ENOMEM; goto in_out_free; } + memset(mod->input_buffers, 0, sizeof(*mod->output_buffers) * mod->max_sources); } else { mod->output_buffers = NULL; } @@ -586,7 +598,8 @@ int module_adapter_prepare(struct comp_dev *dev) size_t size = MAX(mod->deep_buff_bytes, mod->period_bytes); list_for_item(blist, &dev->bsource_list) { - mod->input_buffers[i].data = rballoc(memory_flags, size); + mod->input_buffers[i].data = sof_heap_alloc(sof_sys_user_heap_get(), + memory_flags, size, 0); if (!mod->input_buffers[i].data) { comp_err(mod->dev, "Failed to alloc input buffer data"); ret = -ENOMEM; @@ -598,7 +611,9 @@ int module_adapter_prepare(struct comp_dev *dev) /* allocate memory for output buffer data */ i = 0; list_for_item(blist, &dev->bsink_list) { - mod->output_buffers[i].data = rballoc(memory_flags, md->mpd.out_buff_size); + mod->output_buffers[i].data = sof_heap_alloc(sof_sys_user_heap_get(), + memory_flags, + md->mpd.out_buff_size, 0); if (!mod->output_buffers[i].data) { comp_err(mod->dev, "Failed to alloc output buffer data"); ret = -ENOMEM; @@ -623,7 +638,8 @@ int module_adapter_prepare(struct comp_dev *dev) goto free; } - if (md->resources.heap && md->resources.heap != dev->drv->user_heap) { + if (dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP && + md->resources.heap) { struct dp_heap_user *dp_user = container_of(md->resources.heap, struct dp_heap_user, heap); @@ -669,20 +685,23 @@ int module_adapter_prepare(struct comp_dev *dev) list_item_del(&buffer->buffers_list); irq_local_enable(flags); buffer_free(buffer); + if (dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP && + md->resources.heap) + dp_heap_put(md->resources.heap); } out_data_free: for (i = 0; i < mod->num_of_sinks; i++) - rfree(mod->output_buffers[i].data); + sof_heap_free(sof_sys_user_heap_get(), mod->output_buffers[i].data); in_data_free: for (i = 0; i < mod->num_of_sources; i++) - rfree(mod->input_buffers[i].data); + sof_heap_free(sof_sys_user_heap_get(), mod->input_buffers[i].data); in_out_free: - rfree(mod->output_buffers); + sof_heap_free(sof_sys_user_heap_get(), mod->output_buffers); mod->output_buffers = NULL; - rfree(mod->input_buffers); + sof_heap_free(sof_sys_user_heap_get(), mod->input_buffers); mod->input_buffers = NULL; return ret; } @@ -1394,14 +1413,16 @@ int module_adapter_reset(struct comp_dev *dev) if (IS_PROCESSING_MODE_RAW_DATA(mod)) { for (i = 0; i < mod->num_of_sinks; i++) - rfree((__sparse_force void *)mod->output_buffers[i].data); + sof_heap_free(sof_sys_user_heap_get(), + (__sparse_force void *)mod->output_buffers[i].data); for (i = 0; i < mod->num_of_sources; i++) - rfree((__sparse_force void *)mod->input_buffers[i].data); + sof_heap_free(sof_sys_user_heap_get(), + (__sparse_force void *)mod->input_buffers[i].data); } if (IS_PROCESSING_MODE_RAW_DATA(mod) || IS_PROCESSING_MODE_AUDIO_STREAM(mod)) { - rfree(mod->output_buffers); - rfree(mod->input_buffers); + sof_heap_free(sof_sys_user_heap_get(), mod->output_buffers); + sof_heap_free(sof_sys_user_heap_get(), mod->input_buffers); mod->num_of_sources = 0; mod->num_of_sinks = 0; @@ -1459,6 +1480,9 @@ void module_adapter_free(struct comp_dev *dev) list_item_del(&buffer->buffers_list); irq_local_enable(flags); buffer_free(buffer); + if (dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP && + mod->priv.resources.heap) + dp_heap_put(mod->priv.resources.heap); } mod_free(mod, mod->stream_params); diff --git a/src/audio/pcm_converter/pcm_converter_generic.c b/src/audio/pcm_converter/pcm_converter_generic.c index fe96e6d1f124..4938be438dbc 100644 --- a/src/audio/pcm_converter/pcm_converter_generic.c +++ b/src/audio/pcm_converter/pcm_converter_generic.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -669,7 +670,7 @@ static int pcm_convert_f_to_s32(const struct audio_stream *source, } #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_S32LE */ -const struct pcm_func_map pcm_func_map[] = { +APP_TASK_DATA const struct pcm_func_map pcm_func_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_U8 { SOF_IPC_FRAME_U8, SOF_IPC_FRAME_U8, just_copy }, #endif /* CONFIG_PCM_CONVERTER_FORMAT_U8 */ @@ -732,7 +733,7 @@ const struct pcm_func_map pcm_func_map[] = { #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_S32LE */ }; -const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); +APP_TASK_DATA const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 static int pcm_convert_s16_c16_to_s16_c32(const struct audio_stream *source, @@ -1020,7 +1021,7 @@ static int pcm_convert_s24_c32_to_s24_c24_link_gtw(const struct audio_stream *so #endif -const struct pcm_func_vc_map pcm_func_vc_map[] = { +APP_TASK_DATA const struct pcm_func_vc_map pcm_func_vc_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S32_LE, SOF_IPC_FRAME_S16_LE, pcm_convert_s16_c16_to_s16_c32 }, @@ -1101,6 +1102,6 @@ const struct pcm_func_vc_map pcm_func_vc_map[] = { #endif }; -const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); +APP_TASK_DATA const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); #endif diff --git a/src/audio/pcm_converter/pcm_converter_hifi3.c b/src/audio/pcm_converter/pcm_converter_hifi3.c index 2b6ca607415d..7c75e326b1c0 100644 --- a/src/audio/pcm_converter/pcm_converter_hifi3.c +++ b/src/audio/pcm_converter/pcm_converter_hifi3.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -764,7 +765,7 @@ static int pcm_convert_f_to_s32(const struct audio_stream *source, #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_32LE */ #endif /* XCHAL_HAVE_FP */ -const struct pcm_func_map pcm_func_map[] = { +APP_TASK_DATA const struct pcm_func_map pcm_func_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_S16LE { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, just_copy }, #endif /* CONFIG_PCM_CONVERTER_FORMAT_S16LE */ @@ -807,7 +808,7 @@ const struct pcm_func_map pcm_func_map[] = { #endif /* CONFIG_PCM_CONVERTER_FORMAT_FLOAT && CONFIG_PCM_CONVERTER_FORMAT_S32LE */ #endif /* XCHAL_HAVE_FP */ }; -const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); +APP_TASK_DATA const size_t pcm_func_count = ARRAY_SIZE(pcm_func_map); #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 static int pcm_convert_s16_c16_to_s16_c32(const struct audio_stream *source, @@ -1206,7 +1207,7 @@ static int pcm_convert_s24_c32_to_s24_c24(const struct audio_stream *source, */ #endif -const struct pcm_func_vc_map pcm_func_vc_map[] = { +APP_TASK_DATA const struct pcm_func_vc_map pcm_func_vc_map[] = { #if CONFIG_PCM_CONVERTER_FORMAT_S16_C16_AND_S16_C32 { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S32_LE, SOF_IPC_FRAME_S16_LE, pcm_convert_s16_c16_to_s16_c32 }, @@ -1283,6 +1284,6 @@ const struct pcm_func_vc_map pcm_func_vc_map[] = { #endif }; -const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); +APP_TASK_DATA const size_t pcm_func_vc_count = ARRAY_SIZE(pcm_func_vc_map); #endif diff --git a/src/audio/pcm_converter/pcm_remap.c b/src/audio/pcm_converter/pcm_remap.c index 9204b21ee8ab..4ae300e195c9 100644 --- a/src/audio/pcm_converter/pcm_remap.c +++ b/src/audio/pcm_converter/pcm_remap.c @@ -5,6 +5,7 @@ #include #include +#include static void mute_channel_c16(struct audio_stream *stream, int channel, int frames) { @@ -423,7 +424,7 @@ static int remap_c16_to_c32_no_shift(const struct audio_stream *source, uint32_t /* Unfortunately, all these nice "if"s were commented out to suppress * CI "defined but not used" warnings. */ -const struct pcm_func_map pcm_remap_func_map[] = { +APP_TASK_DATA const struct pcm_func_map pcm_remap_func_map[] = { /* #if CONFIG_PCM_CONVERTER_FORMAT_S16LE */ { SOF_IPC_FRAME_S16_LE, SOF_IPC_FRAME_S16_LE, remap_c16}, /* #endif */ @@ -474,4 +475,4 @@ const struct pcm_func_map pcm_remap_func_map[] = { /* #endif */ }; -const size_t pcm_remap_func_count = ARRAY_SIZE(pcm_remap_func_map); +APP_TASK_DATA const size_t pcm_remap_func_count = ARRAY_SIZE(pcm_remap_func_map); diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 89bb3574289b..60649a5f284f 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -138,19 +138,28 @@ struct pipeline *pipeline_new(struct k_heap *heap, uint32_t pipeline_id, uint32_ p->pipeline_id = pipeline_id; p->status = COMP_STATE_INIT; p->trigger.cmd = COMP_TRIGGER_NO_ACTION; + +#ifdef CONFIG_SOF_USERSPACE_LL + LOG_WRN("pipeline trace settings cannot be copied"); +#else ret = memcpy_s(&p->tctx, sizeof(struct tr_ctx), &pipe_tr, sizeof(struct tr_ctx)); if (ret < 0) { pipe_err(p, "failed to copy trace settings"); goto free; } +#endif +#ifdef CONFIG_SOF_USERSPACE_LL + LOG_WRN("pipeline position reporting not available"); +#else ret = pipeline_posn_offset_get(&p->posn_offset); if (ret < 0) { pipe_err(p, "pipeline_posn_offset_get failed %d", ret); goto free; } +#endif /* just for retrieving valid ipc_msg header */ ipc_build_stream_posn(&posn, SOF_IPC_STREAM_TRIG_XRUN, p->comp_id); @@ -178,24 +187,40 @@ static void buffer_set_comp(struct comp_buffer *buffer, struct comp_dev *comp, comp_buffer_set_sink_component(buffer, comp); } +#ifdef CONFIG_SOF_USERSPACE_LL +#define PPL_LOCK_DECLARE +#undef IPC_MOVE_TO_USER_NOT_COMPLETE +#ifdef IPC_MOVE_TO_USER_NOT_COMPLETE +#define PPL_LOCK(x) +#define PPL_UNLOCK(x) +#else +#define PPL_LOCK(x) k_mutex_lock(comp->list_mutex, K_FOREVER) +#define PPL_UNLOCK(x) k_mutex_unlock(comp->list_mutex) +#endif +#else +#define PPL_LOCK_DECLARE uint32_t flags +#define PPL_LOCK(x) irq_local_disable(flags) +#define PPL_UNLOCK(x) irq_local_enable(flags) +#endif + int pipeline_connect(struct comp_dev *comp, struct comp_buffer *buffer, int dir) { struct list_item *comp_list; - uint32_t flags; + PPL_LOCK_DECLARE; if (dir == PPL_CONN_DIR_COMP_TO_BUFFER) comp_info(comp, "connect buffer %d as sink", buf_get_id(buffer)); else comp_info(comp, "connect buffer %d as source", buf_get_id(buffer)); - irq_local_disable(flags); + PPL_LOCK(); comp_list = comp_buffer_list(comp, dir); buffer_attach(buffer, comp_list, dir); buffer_set_comp(buffer, comp, dir); - irq_local_enable(flags); + PPL_UNLOCK(); return 0; } @@ -203,20 +228,20 @@ int pipeline_connect(struct comp_dev *comp, struct comp_buffer *buffer, void pipeline_disconnect(struct comp_dev *comp, struct comp_buffer *buffer, int dir) { struct list_item *comp_list; - uint32_t flags; + PPL_LOCK_DECLARE; if (dir == PPL_CONN_DIR_COMP_TO_BUFFER) comp_dbg(comp, "disconnect buffer %d as sink", buf_get_id(buffer)); else comp_dbg(comp, "disconnect buffer %d as source", buf_get_id(buffer)); - irq_local_disable(flags); + PPL_LOCK(); comp_list = comp_buffer_list(comp, dir); buffer_detach(buffer, comp_list, dir); buffer_set_comp(buffer, NULL, dir); - irq_local_enable(flags); + PPL_UNLOCK(); } /* pipelines must be inactive */ @@ -487,6 +512,10 @@ struct comp_dev *pipeline_get_dai_comp(uint32_t pipeline_id, int dir) */ struct comp_dev *pipeline_get_dai_comp_latency(uint32_t pipeline_id, uint32_t *latency) { +#ifdef CONFIG_SOF_USERSPACE_LL + LOG_WRN("latency cannot be computed in user-space pipelines!"); + *latency = 0; +#else struct ipc_comp_dev *ipc_sink; struct ipc_comp_dev *ipc_source; struct comp_dev *source; @@ -554,7 +583,7 @@ struct comp_dev *pipeline_get_dai_comp_latency(uint32_t pipeline_id, uint32_t *l /* Get a next sink component */ ipc_sink = ipc_get_ppl_sink_comp(ipc, source->pipeline->pipeline_id); } - +#endif return NULL; } EXPORT_SYMBOL(pipeline_get_dai_comp_latency); diff --git a/src/audio/pipeline/pipeline-schedule.c b/src/audio/pipeline/pipeline-schedule.c index 45fd1eed639c..7e19b43963ff 100644 --- a/src/audio/pipeline/pipeline-schedule.c +++ b/src/audio/pipeline/pipeline-schedule.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -282,6 +283,16 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx, struct pipeline_data *ppl_data = ctx->comp_data; struct list_item *tlist; struct pipeline *p; + +#ifdef CONFIG_SOF_USERSPACE_LL + /* + * In user-space irq_local_disable() is not available. Use the LL + * scheduler mutex to prevent the scheduler from processing tasks + * while pipeline state is being updated. The k_mutex is re-entrant + * so schedule_task() calls inside the critical section are safe. + */ + zephyr_ll_lock_sched(); +#else uint32_t flags; /* @@ -290,6 +301,7 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx, * immediately before all pipelines achieved a consistent state. */ irq_local_disable(flags); +#endif switch (cmd) { case COMP_TRIGGER_PAUSE: @@ -345,8 +357,11 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx, p->xrun_bytes = 1; } } - +#ifdef CONFIG_SOF_USERSPACE_LL + zephyr_ll_unlock_sched(); +#else irq_local_enable(flags); +#endif } int pipeline_comp_ll_task_init(struct pipeline *p) diff --git a/src/debug/debug_stream/debug_stream_thread_info.c b/src/debug/debug_stream/debug_stream_thread_info.c index 4b47df638158..158f1525ae0f 100644 --- a/src/debug/debug_stream/debug_stream_thread_info.c +++ b/src/debug/debug_stream/debug_stream_thread_info.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -225,6 +226,10 @@ static void thread_info_cb(const struct k_thread *cthread, void *user_data) tinfo->name, tinfo->stack_usage * 100U / 255, tinfo->cpu_usage * 100U / 255); + if (name && strncmp(name, "idle", 4) == 0) + LOG_INF("core %u utilization %u%%", ud->core, + 100U - tinfo->cpu_usage * 100U / 255); + ud->thread_count++; } diff --git a/src/include/sof/audio/buffer.h b/src/include/sof/audio/buffer.h index 91c09ef2e510..39926331ea78 100644 --- a/src/include/sof/audio/buffer.h +++ b/src/include/sof/audio/buffer.h @@ -33,6 +33,7 @@ #include struct comp_dev; +struct k_heap; /** \name Trace macros * @{ @@ -148,6 +149,17 @@ struct comp_buffer { /* list of buffers, to be used i.e. in raw data processing mode*/ struct list_item buffers_list; + + struct k_heap *heap; + +#if CONFIG_PROBE + /** probe produce callback, called on buffer produce */ + void (*probe_cb_produce)(void *arg, struct buffer_cb_transact *cb_data); + /** probe free callback, called on buffer free */ + void (*probe_cb_free)(void *arg); + /** opaque argument passed to probe callbacks */ + void *probe_cb_arg; +#endif }; /* diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index b6695b9cd312..a69be34b15a1 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -679,6 +680,10 @@ struct comp_dev { struct list_item bsource_list; /**< list of source buffers */ struct list_item bsink_list; /**< list of sink buffers */ +#ifdef CONFIG_SOF_USERSPACE_LL + struct k_mutex *list_mutex; /**< protect lists of source/sinks */ +#endif + /* performance data*/ struct comp_perf_data perf_data; /* Input Buffer Size for pin 0, add array for other pins if needed */ @@ -863,6 +868,10 @@ static inline void comp_init(const struct comp_driver *drv, dev->state = COMP_STATE_INIT; list_init(&dev->bsink_list); list_init(&dev->bsource_list); +#ifdef CONFIG_SOF_USERSPACE_LL + dev->list_mutex = k_object_alloc(K_OBJ_MUTEX); + k_mutex_init(dev->list_mutex); +#endif memcpy_s(&dev->tctx, sizeof(dev->tctx), trace_comp_drv_get_tr_ctx(dev->drv), sizeof(struct tr_ctx)); } @@ -1228,6 +1237,16 @@ void comp_init_performance_data(struct comp_dev *dev); */ bool comp_update_performance_data(struct comp_dev *dev, uint32_t cycles_used); +/** + * Grant access to component to a thread. + * + * Must be called from kernel context. + * + * @param dev Component to update. + * @param th thread to give access to + */ +void comp_grant_access_to_thread(const struct comp_dev *dev, struct k_thread *th); + static inline int user_get_buffer_memory_region(const struct comp_driver *drv) { #if CONFIG_SOF_USERSPACE_USE_DRIVER_HEAP diff --git a/src/include/sof/audio/component_ext.h b/src/include/sof/audio/component_ext.h index d2bbf87a7764..d0afe4be3ad8 100644 --- a/src/include/sof/audio/component_ext.h +++ b/src/include/sof/audio/component_ext.h @@ -53,6 +53,10 @@ static inline void comp_free(struct comp_dev *dev) * be freed after this. */ drv->ops.free(dev); + +#ifdef CONFIG_SOF_USERSPACE_LL + k_object_free(dev->list_mutex); +#endif } /** diff --git a/src/include/sof/coherent.h b/src/include/sof/coherent.h index 172e45b4ed92..ba6b8d8c7e52 100644 --- a/src/include/sof/coherent.h +++ b/src/include/sof/coherent.h @@ -86,8 +86,8 @@ STATIC_ASSERT(sizeof(struct coherent) <= DCACHE_LINE_SIZE, DCACHE_LINE_SIZE_too #define ADDR_IS_COHERENT(_c) #endif -/* debug sharing amongst cores */ -#ifdef COHERENT_CHECK_NONSHARED_CORES +/* debug sharing amongst cores - not available in user-space builds */ +#if defined(COHERENT_CHECK_NONSHARED_CORES) && !defined(CONFIG_SOF_USERSPACE_LL) #define CORE_CHECK_STRUCT_FIELD uint32_t __core; bool __is_shared #define CORE_CHECK_STRUCT_INIT(_c, is_shared) { (_c)->__core = cpu_get_id(); \ diff --git a/src/include/sof/ipc/common.h b/src/include/sof/ipc/common.h index e46fc10b9521..6faca4ca8413 100644 --- a/src/include/sof/ipc/common.h +++ b/src/include/sof/ipc/common.h @@ -53,6 +53,20 @@ extern struct tr_ctx ipc_tr; #define IPC_TASK_SECONDARY_CORE BIT(2) #define IPC_TASK_POWERDOWN BIT(3) +struct ipc_user { + struct k_thread *thread; + struct k_sem *sem; + struct k_event *event; + /** @brief Copy of IPC4 message primary word forwarded to user thread */ + uint32_t ipc_msg_pri; + /** @brief Copy of IPC4 message extension word forwarded to user thread */ + uint32_t ipc_msg_ext; + /** @brief Result code from user thread processing */ + int result; + struct ipc *ipc; + struct k_thread *audio_thread; +}; + struct ipc { struct k_spinlock lock; /* locking mechanism */ void *comp_data; @@ -74,6 +88,10 @@ struct ipc { struct task ipc_task; #endif +#ifdef CONFIG_SOF_USERSPACE_LL + struct ipc_user *ipc_user_pdata; +#endif + #ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS /* io performance measurement */ struct io_perf_data_item *io_perf_in_msg_count; @@ -95,6 +113,12 @@ struct ipc { extern struct task_ops ipc_task_ops; +#ifdef CONFIG_SOF_USERSPACE_LL + +struct ipc *ipc_get(void); + +#else + /** * \brief Get the IPC global context. * @return The global IPC context. @@ -104,6 +128,8 @@ static inline struct ipc *ipc_get(void) return sof_get()->ipc; } +#endif /* CONFIG_SOF_USERSPACE_LL */ + /** * \brief Initialise global IPC context. * @param[in,out] sof Global SOF context. @@ -240,7 +266,7 @@ int ipc_process_on_core(uint32_t core, bool blocking); * \brief reply to an IPC message. * @param[in] reply pointer to the reply structure. */ -void ipc_msg_reply(struct sof_ipc_reply *reply); +#include /** * \brief Call platform-specific IPC completion function. @@ -250,4 +276,14 @@ void ipc_complete_cmd(struct ipc *ipc); /* GDB stub: should enter GDB after completing the IPC processing */ extern bool ipc_enter_gdb; +#ifdef CONFIG_SOF_USERSPACE_LL +struct ipc4_message_request; +/** + * @brief Forward an IPC4 command to the user-space thread. + * @param ipc4 Pointer to the IPC4 message request + * @return Result from user thread processing + */ +int ipc_user_forward_cmd(struct ipc4_message_request *ipc4); +#endif + #endif /* __SOF_DRIVERS_IPC_H__ */ diff --git a/src/include/sof/ipc/ipc_reply.h b/src/include/sof/ipc/ipc_reply.h new file mode 100644 index 000000000000..756fd535ca30 --- /dev/null +++ b/src/include/sof/ipc/ipc_reply.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2026 Intel Corporation. All rights reserved. + */ + +#ifndef __SOF_IPC_IPC_REPLY_H__ +#define __SOF_IPC_IPC_REPLY_H__ + +#include + +struct sof_ipc_reply; + +/** + * \brief reply to an IPC message. + * @param[in] reply pointer to the reply structure. + */ +#if defined(__ZEPHYR__) && defined(CONFIG_SOF_USERSPACE_LL) +__syscall void ipc_msg_reply(struct sof_ipc_reply *reply); +#else +void z_impl_ipc_msg_reply(struct sof_ipc_reply *reply); +#define ipc_msg_reply z_impl_ipc_msg_reply +#endif + +#if defined(__ZEPHYR__) && defined(CONFIG_SOF_USERSPACE_LL) +#include +#endif + +#endif /* __SOF_IPC_IPC_REPLY_H__ */ diff --git a/src/include/sof/lib/dai-zephyr.h b/src/include/sof/lib/dai-zephyr.h index 8ff739fa42d5..76e9de96c325 100644 --- a/src/include/sof/lib/dai-zephyr.h +++ b/src/include/sof/lib/dai-zephyr.h @@ -52,7 +52,10 @@ struct dai { uint32_t dma_dev; const struct device *dev; const struct dai_data *dd; - struct k_spinlock lock; /* protect properties */ + struct k_mutex *lock; /* protect properties */ +#ifndef CONFIG_SOF_USERSPACE_LL + struct k_mutex lock_obj; +#endif }; union hdalink_cfg { @@ -117,7 +120,7 @@ typedef int (*channel_copy_func)(const struct audio_stream *src, unsigned int sr */ struct dai_data { /* local DMA config */ - struct dma_chan_data *chan; + int chan_index; uint32_t stream_id; struct dma_sg_config config; struct dma_config *z_config; @@ -168,6 +171,7 @@ struct dai_data { #endif /* Copier gain params */ struct copier_gain_params *gain_data; + struct k_heap *heap; }; /* these 3 are here to satisfy clk.c and ssp.h interconnection, will be removed leter */ diff --git a/src/include/sof/lib/notifier.h b/src/include/sof/lib/notifier.h index 060906655cb8..87ca2cd40265 100644 --- a/src/include/sof/lib/notifier.h +++ b/src/include/sof/lib/notifier.h @@ -27,9 +27,6 @@ enum notify_id { NOTIFIER_ID_SSP_FREQ, /* struct clock_notify_data * */ NOTIFIER_ID_KPB_CLIENT_EVT, /* struct kpb_event_data * */ NOTIFIER_ID_DMA_DOMAIN_CHANGE, /* struct dma_chan_data * */ - NOTIFIER_ID_BUFFER_PRODUCE, /* struct buffer_cb_transact* */ - NOTIFIER_ID_BUFFER_CONSUME, /* struct buffer_cb_transact* */ - NOTIFIER_ID_BUFFER_FREE, /* struct buffer_cb_free* */ NOTIFIER_ID_DMA_COPY, /* struct dma_cb_data* */ NOTIFIER_ID_LL_POST_RUN, /* NULL */ NOTIFIER_ID_DMA_IRQ, /* struct dma_chan_data * */ diff --git a/src/include/sof/schedule/dp_schedule.h b/src/include/sof/schedule/dp_schedule.h index 37b8f1fc3f2c..9a63d503329b 100644 --- a/src/include/sof/schedule/dp_schedule.h +++ b/src/include/sof/schedule/dp_schedule.h @@ -8,6 +8,7 @@ #ifndef __SOF_SCHEDULE_DP_SCHEDULE_H__ #define __SOF_SCHEDULE_DP_SCHEDULE_H__ +#include #include #include #include @@ -125,6 +126,22 @@ struct dp_heap_user { unsigned int client_count; /* devices and buffers */ }; +/** + * dp_heap_put() - Release a reference to a DP module heap. + * @heap: The k_heap pointer belonging to a dp_heap_user. + * + * Decrements client_count and frees the dp_heap_user when it reaches zero. + * Must only be called for heaps that are part of a dp_heap_user, i.e. heaps + * allocated by module_adapter_dp_heap_new() for DP domain modules. + */ +static inline void dp_heap_put(struct k_heap *heap) +{ + struct dp_heap_user *mod_heap_user = container_of(heap, struct dp_heap_user, heap); + + if (!--mod_heap_user->client_count) + rfree(mod_heap_user); +} + #if CONFIG_ZEPHYR_DP_SCHEDULER int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd, const union scheduler_dp_thread_ipc_param *param); diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index dc28e43c7461..639dcb5caea6 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -43,6 +43,20 @@ struct ll_schedule_domain_ops { void (*handler)(void *arg), void *arg); int (*domain_unregister)(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks); +#if CONFIG_SOF_USERSPACE_LL + /* + * Initialize the scheduling thread and perform all privileged setup + * (thread creation, timer init, access grants). Called once from + * kernel context before any user-space domain_register() calls. + */ + int (*domain_thread_init)(struct ll_schedule_domain *domain, + struct task *task); + /* Free resources acquired by domain_thread_init(). Called from + * kernel context when the scheduling context is being torn down. + */ + void (*domain_thread_free)(struct ll_schedule_domain *domain, + uint32_t num_tasks); +#endif void (*domain_enable)(struct ll_schedule_domain *domain, int core); void (*domain_disable)(struct ll_schedule_domain *domain, int core); #if CONFIG_CROSS_CORE_STREAM @@ -99,8 +113,12 @@ static inline struct ll_schedule_domain *dma_domain_get(void) #ifdef CONFIG_SOF_USERSPACE_LL struct task *zephyr_ll_task_alloc(void); +void zephyr_ll_task_free(struct task *task); struct k_heap *zephyr_ll_user_heap(void); void zephyr_ll_user_resources_init(void); +void zephyr_ll_grant_access(struct k_thread *thread); +void zephyr_ll_lock_sched(void); +void zephyr_ll_unlock_sched(void); #endif /* CONFIG_SOF_USERSPACE_LL */ static inline struct ll_schedule_domain *domain_init @@ -177,6 +195,31 @@ static inline void domain_task_cancel(struct ll_schedule_domain *domain, domain->ops->domain_task_cancel(domain, task); } +#if CONFIG_SOF_USERSPACE_LL +/* + * Initialize the scheduling thread and do all privileged setup. + * Must be called from kernel context before user-space tasks register. + */ +static inline int domain_thread_init(struct ll_schedule_domain *domain, + struct task *task) +{ + assert(domain->ops->domain_thread_init); + + return domain->ops->domain_thread_init(domain, task); +} + +/* + * Free resources acquired by domain_thread_init(). + * Must be called from kernel context. + */ +static inline void domain_thread_free(struct ll_schedule_domain *domain, + uint32_t num_tasks) +{ + if (domain->ops->domain_thread_free) + domain->ops->domain_thread_free(domain, num_tasks); +} +#endif + static inline int domain_register(struct ll_schedule_domain *domain, struct task *task, void (*handler)(void *arg), void *arg) diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index bbdcbbecf3b4..9925e8d9b273 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -158,6 +158,25 @@ struct scheduler_ops { * This operation is optional. */ int (*scheduler_restore)(void *data); + + /** + * Initializes context + * @param data Private data of selected scheduler. + * @param task task that needs to be scheduled + * @return thread that will be used to run the scheduled task + * + * This operation is optional. + */ + struct k_thread *(*scheduler_init_context)(void *data, struct task *task); + + /** + * Frees scheduler context + * @param data Private data of selected scheduler. + * + * This operation is optional. + */ + void (*scheduler_free_context)(void *data); + }; /** \brief Holds information about scheduler. */ @@ -379,6 +398,41 @@ static inline int schedulers_restore(void) return 0; } + +/** See scheduler_ops::scheduler_init_context */ +static inline struct k_thread *scheduler_init_context(struct task *task) +{ + struct schedulers *schedulers = *arch_schedulers_get(); + struct schedule_data *sch; + struct list_item *slist; + + assert(schedulers); + + list_for_item(slist, &schedulers->list) { + sch = container_of(slist, struct schedule_data, list); + if (sch->ops->scheduler_init_context) + return sch->ops->scheduler_init_context(sch->data, task); + } + + return 0; +} + +/** See scheduler_ops::scheduler_free_context */ +static inline void scheduler_free_context(void) +{ + struct schedulers *schedulers = *arch_schedulers_get(); + struct schedule_data *sch; + struct list_item *slist; + + assert(schedulers); + + list_for_item(slist, &schedulers->list) { + sch = container_of(slist, struct schedule_data, list); + if (sch->ops->scheduler_free_context) + sch->ops->scheduler_free_context(sch->data); + } +} + /** * Initializes scheduling task. * @param task Task to be initialized. diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index 10f241784625..2c95542d8646 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include #include @@ -35,6 +37,16 @@ #include #include +#ifdef __ZEPHYR__ +#include +#endif + +#ifdef CONFIG_SOF_USERSPACE_LL +#include +#include +#include +#endif + #include LOG_MODULE_REGISTER(ipc, CONFIG_SOF_LOG_LEVEL); @@ -43,6 +55,18 @@ SOF_DEFINE_REG_UUID(ipc); DECLARE_TR_CTX(ipc_tr, SOF_UUID(ipc_uuid), LOG_LEVEL_INFO); +#ifdef CONFIG_SOF_USERSPACE_LL +K_APPMEM_PARTITION_DEFINE(ipc_context_part); + +K_APP_BMEM(ipc_context_part) static struct ipc ipc_context; + +struct ipc *ipc_get(void) +{ + return &ipc_context; +} +EXPORT_SYMBOL(ipc_get); +#endif + int ipc_process_on_core(uint32_t core, bool blocking) { struct ipc *ipc = ipc_get(); @@ -256,7 +280,11 @@ void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority) list_item_append(&msg->list, &ipc->msg_list); } +#if 0 /*def CONFIG_SOF_USERSPACE_LL */ + LOG_WRN("Skipping IPC worker schedule. TODO to fix\n"); +#else schedule_ipc_worker(); +#endif k_spin_unlock(&ipc->lock, key); } @@ -288,34 +316,207 @@ void ipc_schedule_process(struct ipc *ipc) #endif } +#ifdef CONFIG_SOF_USERSPACE_LL +/* User-space thread for pipeline_two_components test */ +#define IPC_USER_STACKSIZE 8192 + +#define IPC_USER_EVENT_CMD BIT(0) +#define IPC_USER_EVENT_STOP BIT(1) + +static struct k_thread ipc_user_thread; +static K_THREAD_STACK_DEFINE(ipc_user_stack, IPC_USER_STACKSIZE); + +/** + * @brief Forward an IPC4 command to the user-space thread. + * + * Called from kernel context (IPC EDF task) to forward the IPC4 + * message to the user-space thread for processing. Sets + * IPC_TASK_IN_THREAD in task_mask so the host is not signaled + * until the user thread completes. Blocks until the user thread + * finishes processing and returns the result. + * + * @param ipc4 Pointer to the IPC4 message request + * @return Result from user thread processing + */ +int ipc_user_forward_cmd(struct ipc4_message_request *ipc4) +{ + struct ipc *ipc = ipc_get(); + struct ipc_user *pdata = ipc->ipc_user_pdata; + k_spinlock_key_t key; + int ret; + + LOG_DBG("IPC: forward cmd %08x", ipc4->primary.dat); + + /* Copy message words — original buffer may be reused */ + pdata->ipc_msg_pri = ipc4->primary.dat; + pdata->ipc_msg_ext = ipc4->extension.dat; + pdata->ipc = ipc; + + /* Prevent host completion until user thread finishes */ + key = k_spin_lock(&ipc->lock); + ipc->task_mask |= IPC_TASK_IN_THREAD; + k_spin_unlock(&ipc->lock, key); + + /* Wake the user thread */ + k_event_set(pdata->event, IPC_USER_EVENT_CMD); + + /* Wait for user thread to complete */ + ret = k_sem_take(pdata->sem, K_MSEC(10)); + if (ret) { + LOG_ERR("IPC user: sem error %d\n", ret); + return ret; + } + + /* Clear the task mask bit and check for completion */ + key = k_spin_lock(&ipc->lock); + ipc->task_mask &= ~IPC_TASK_IN_THREAD; + ipc_complete_cmd(ipc); + k_spin_unlock(&ipc->lock, key); + + return pdata->result; +} + +/** + * User-space thread entry point for pipeline_two_components test. + * p1 points to the ppl_test_ctx shared with the kernel launcher. + */ +static void ipc_user_thread_fn(void *p1, void *p2, void *p3) +{ + struct ipc_user *ipc_user = p1; + + ARG_UNUSED(p2); + ARG_UNUSED(p3); + + __ASSERT(k_is_user_context(), "expected user context"); + + /* Signal startup complete — unblocks init waiting on semaphore */ + k_sem_give(ipc_user->sem); + LOG_INF("IPC user-space thread started"); + + for (;;) { + uint32_t mask = k_event_wait_safe(ipc_user->event, + IPC_USER_EVENT_CMD | IPC_USER_EVENT_STOP, + false, K_MSEC(5000)); + + LOG_DBG("IPC user wake, mask %u", mask); + + if (mask & IPC_USER_EVENT_CMD) { + struct ipc4_pipeline_create pipe_msg; + + /* Reconstruct the IPC4 message from copied words */ + pipe_msg.primary.dat = ipc_user->ipc_msg_pri; + pipe_msg.extension.dat = ipc_user->ipc_msg_ext; + + /* Execute pipeline creation in user context */ + ipc_user->result = ipc_pipeline_new(ipc_user->ipc, (ipc_pipe_new *)&pipe_msg); + + /* Signal completion — kernel side will finish IPC */ + k_sem_give(ipc_user->sem); + } + + if (mask & IPC_USER_EVENT_STOP) + break; + } +} + +__cold int ipc_user_init(void) +{ + struct ipc *ipc = ipc_get(); + struct ipc_user *ipc_user = sof_heap_alloc(sof_sys_user_heap_get(), SOF_MEM_FLAG_USER, + sizeof(*ipc_user), 0); + int ret; + + ipc_user->sem = k_object_alloc(K_OBJ_SEM); + if (!ipc_user->sem) { + LOG_ERR("user IPC sem alloc failed"); + k_panic(); + } + + ret = k_mem_domain_add_partition(zephyr_ll_mem_domain(), &ipc_context_part); + + k_sem_init(ipc_user->sem, 0, 1); + + /* Allocate kernel objects for the user-space thread */ + ipc_user->event = k_object_alloc(K_OBJ_EVENT); + if (!ipc_user->event) { + LOG_ERR("user IPC event alloc failed"); + k_panic(); + } + k_event_init(ipc_user->event); + + k_thread_create(&ipc_user_thread, ipc_user_stack, IPC_USER_STACKSIZE, + ipc_user_thread_fn, ipc_user, NULL, NULL, + -1, K_USER, K_FOREVER); + + k_thread_access_grant(&ipc_user_thread, ipc_user->sem, ipc_user->event); + user_grant_dai_access_all(&ipc_user_thread); + user_grant_dma_access_all(&ipc_user_thread); + user_access_to_mailbox(zephyr_ll_mem_domain(), &ipc_user_thread); + zephyr_ll_grant_access(&ipc_user_thread); + k_mem_domain_add_thread(zephyr_ll_mem_domain(), &ipc_user_thread); + + k_thread_name_set(&ipc_user_thread, __func__); + + /* Store references in ipc struct so kernel handler can forward commands */ + ipc->ipc_user_pdata = ipc_user; + + k_thread_start(&ipc_user_thread); + + struct task *task = zephyr_ll_task_alloc(); + schedule_task_init_ll(task, SOF_UUID(ipc_uuid), SOF_SCHEDULE_LL_TIMER, + 0, NULL, NULL, cpu_get_id(), 0); + ipc_user->audio_thread = scheduler_init_context(task); + + /* Wait for user thread startup — consumes the initial k_sem_give from thread */ + k_sem_take(ipc->ipc_user_pdata->sem, K_FOREVER); + + return 0; +} +#else +static int ipc_user_init(void) +{ + return 0; +} +#endif /* CONFIG_SOF_USERSPACE_LL */ + __cold int ipc_init(struct sof *sof) { + struct k_heap *heap; + struct ipc *ipc; + assert_can_be_cold(); tr_dbg(&ipc_tr, "entry"); -#if CONFIG_SOF_BOOT_TEST_STANDALONE - LOG_INF("SOF_BOOT_TEST_STANDALONE, disabling IPC."); - return 0; -#endif +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); + + ipc = ipc_get(); + memset(ipc, 0, sizeof(*ipc)); +#else + heap = NULL; /* init ipc data */ - sof->ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*sof->ipc)); - if (!sof->ipc) { + ipc = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(*ipc)); + if (!ipc) { tr_err(&ipc_tr, "Unable to allocate IPC data"); return -ENOMEM; } - sof->ipc->comp_data = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - SOF_IPC_MSG_MAX_SIZE); - if (!sof->ipc->comp_data) { + sof->ipc = ipc; +#endif + + ipc->comp_data = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + SOF_IPC_MSG_MAX_SIZE, 0); + if (!ipc->comp_data) { tr_err(&ipc_tr, "Unable to allocate IPC component data"); - rfree(sof->ipc); + sof_heap_free(heap, ipc); return -ENOMEM; } + memset(ipc->comp_data, 0, SOF_IPC_MSG_MAX_SIZE); - k_spinlock_init(&sof->ipc->lock); - list_init(&sof->ipc->msg_list); - list_init(&sof->ipc->comp_list); + k_spinlock_init(&ipc->lock); + list_init(&ipc->msg_list); + list_init(&ipc->comp_list); #ifdef CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS struct io_perf_data_item init_data = {IO_PERF_IPC_ID, @@ -324,15 +525,17 @@ __cold int ipc_init(struct sof *sof) IO_PERF_POWERED_UP_ENABLED, IO_PERF_D0IX_POWER_MODE, 0, 0, 0 }; - io_perf_monitor_init_data(&sof->ipc->io_perf_in_msg_count, &init_data); + io_perf_monitor_init_data(&ipc->io_perf_in_msg_count, &init_data); init_data.direction = IO_PERF_OUTPUT_DIRECTION; - io_perf_monitor_init_data(&sof->ipc->io_perf_out_msg_count, &init_data); + io_perf_monitor_init_data(&ipc->io_perf_out_msg_count, &init_data); #endif + #ifdef __ZEPHYR__ - struct k_thread *thread = &sof->ipc->ipc_send_wq.thread; + struct k_thread *thread = &ipc->ipc_send_wq.thread; - k_work_queue_start(&sof->ipc->ipc_send_wq, ipc_send_wq_stack, + k_work_queue_init(&ipc->ipc_send_wq); + k_work_queue_start(&ipc->ipc_send_wq, ipc_send_wq_stack, K_THREAD_STACK_SIZEOF(ipc_send_wq_stack), 1, NULL); k_thread_suspend(thread); @@ -342,10 +545,17 @@ __cold int ipc_init(struct sof *sof) k_thread_resume(thread); - k_work_init_delayable(&sof->ipc->z_delayed_work, ipc_work_handler); + k_work_init_delayable(&ipc->z_delayed_work, ipc_work_handler); +#endif + + ipc_user_init(); + +#if CONFIG_SOF_BOOT_TEST_STANDALONE + LOG_INF("SOF_BOOT_TEST_STANDALONE, skipping platform IPC init."); + return 0; #endif - return platform_ipc_init(sof->ipc); + return platform_ipc_init(ipc); } /* Locking: call with ipc->lock held and interrupts disabled */ diff --git a/src/ipc/ipc-helper.c b/src/ipc/ipc-helper.c index 2f685b551747..f1fe867c98cb 100644 --- a/src/ipc/ipc-helper.c +++ b/src/ipc/ipc-helper.c @@ -354,7 +354,7 @@ __cold int ipc_comp_free(struct ipc *ipc, uint32_t comp_id) icd->cd = NULL; list_item_del(&icd->list); - rfree(icd); + sof_heap_free(sof_sys_user_heap_get(), icd); return 0; } diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index 80ddb2225e43..5bd5293c61b7 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -878,7 +878,7 @@ static int ipc_dma_trace_config(uint32_t header) error: #if CONFIG_HOST_PTABLE - dma_sg_free(&elem_array); + dma_sg_free(NULL, &elem_array); processing_error: #endif diff --git a/src/ipc/ipc3/helper.c b/src/ipc/ipc3/helper.c index 4d87f042dd1d..fd2af4603b29 100644 --- a/src/ipc/ipc3/helper.c +++ b/src/ipc/ipc3/helper.c @@ -710,7 +710,7 @@ int ipc_comp_new(struct ipc *ipc, ipc_comp *_comp) return 0; } -void ipc_msg_reply(struct sof_ipc_reply *reply) +void z_impl_ipc_msg_reply(struct sof_ipc_reply *reply) { struct ipc *ipc = ipc_get(); k_spinlock_key_t key; diff --git a/src/ipc/ipc3/host-page-table.c b/src/ipc/ipc3/host-page-table.c index 7a3da31caa82..997ee6683352 100644 --- a/src/ipc/ipc3/host-page-table.c +++ b/src/ipc/ipc3/host-page-table.c @@ -239,6 +239,6 @@ int ipc_process_host_buffer(struct ipc *ipc, return 0; error: - dma_sg_free(elem_array); + dma_sg_free(NULL, elem_array); return err; } diff --git a/src/ipc/ipc4/dai.c b/src/ipc/ipc4/dai.c index 9f63fd4196f7..edfd1fdacf2b 100644 --- a/src/ipc/ipc4/dai.c +++ b/src/ipc/ipc4/dai.c @@ -202,7 +202,7 @@ void dai_dma_release(struct dai_data *dd, struct comp_dev *dev) } /* put the allocated DMA channel first */ - if (dd->chan) { + if (dd->chan_index != -1) { struct ipc4_llp_reading_slot slot; if (dd->slot_info.node_id) { @@ -224,15 +224,16 @@ void dai_dma_release(struct dai_data *dd, struct comp_dev *dev) */ #if CONFIG_ZEPHYR_NATIVE_DRIVERS /* if reset is after pause dma has already been stopped */ - dma_stop(dd->chan->dma->z_dev, dd->chan->index); + dma_stop(dd->dma->z_dev, dd->chan_index); - dma_release_channel(dd->chan->dma->z_dev, dd->chan->index); + dma_release_channel(dd->dma->z_dev, dd->chan_index); #else + /* TODO: to remove this, no longer works! */ dma_stop_legacy(dd->chan); dma_channel_put_legacy(dd->chan); -#endif - dd->chan->dev_data = NULL; dd->chan = NULL; +#endif + } } @@ -351,9 +352,9 @@ __cold int dai_config(struct dai_data *dd, struct comp_dev *dev, return 0; } - if (dd->chan) { + if (dd->chan_index != -1) { comp_info(dev, "Configured. dma channel index %d, ignore...", - dd->chan->index); + dd->chan_index); return 0; } @@ -374,15 +375,17 @@ __cold int dai_config(struct dai_data *dd, struct comp_dev *dev, /* allocated dai_config if not yet */ if (!dd->dai_spec_config) { size = sizeof(*copier_cfg); - dd->dai_spec_config = rzalloc(SOF_MEM_FLAG_USER, size); + dd->dai_spec_config = sof_heap_alloc(dd->heap, SOF_MEM_FLAG_USER, size, 0); if (!dd->dai_spec_config) { comp_err(dev, "No memory for size %d", size); return -ENOMEM; } + memset(dd->dai_spec_config, 0, size); + ret = memcpy_s(dd->dai_spec_config, size, copier_cfg, size); if (ret < 0) { - rfree(dd->dai_spec_config); + sof_heap_free(dd->heap, dd->dai_spec_config); dd->dai_spec_config = NULL; return -EINVAL; } @@ -412,7 +415,7 @@ int dai_common_position(struct dai_data *dd, struct comp_dev *dev, platform_dai_wallclock(dev, &dd->wallclock); posn->wallclock = dd->wallclock; - ret = dma_get_status(dd->dma->z_dev, dd->chan->index, &status); + ret = dma_get_status(dd->dma->z_dev, dd->chan_index, &status); if (ret < 0) return ret; @@ -437,7 +440,7 @@ void dai_dma_position_update(struct dai_data *dd, struct comp_dev *dev) if (!dd->slot_info.node_id) return; - ret = dma_get_status(dd->dma->z_dev, dd->chan->index, &status); + ret = sof_dma_get_status(dd->dma, dd->chan_index, &status); if (ret < 0) return; diff --git a/src/ipc/ipc4/handler.c b/src/ipc/ipc4/handler.c index fc64c904ef80..fd9d8a768b9c 100644 --- a/src/ipc/ipc4/handler.c +++ b/src/ipc/ipc4/handler.c @@ -38,6 +38,11 @@ #include #include +#ifdef __ZEPHYR__ +#include +#include +#endif + #include #include #include @@ -127,6 +132,7 @@ static inline const struct ipc4_pipeline_set_state_data *ipc4_get_pipeline_data( /* * Global IPC Operations. */ +#ifndef CONFIG_SOF_USERSPACE_LL __cold static int ipc4_new_pipeline(struct ipc4_message_request *ipc4) { struct ipc *ipc = ipc_get(); @@ -135,6 +141,7 @@ __cold static int ipc4_new_pipeline(struct ipc4_message_request *ipc4) return ipc_pipeline_new(ipc, (ipc_pipe_new *)ipc4); } +#endif __cold static int ipc4_delete_pipeline(struct ipc4_message_request *ipc4) { @@ -805,7 +812,12 @@ static int ipc4_process_glb_message(struct ipc4_message_request *ipc4) /* pipeline settings */ case SOF_IPC4_GLB_CREATE_PIPELINE: + /* Implementation in progress: forward only CREATE_PIPELINE for now */ +#ifdef CONFIG_SOF_USERSPACE_LL + ret = ipc_user_forward_cmd(ipc4); +#else ret = ipc4_new_pipeline(ipc4); +#endif break; case SOF_IPC4_GLB_DELETE_PIPELINE: ret = ipc4_delete_pipeline(ipc4); @@ -1672,7 +1684,7 @@ void ipc_send_buffer_status_notify(void) } #endif -void ipc_msg_reply(struct sof_ipc_reply *reply) +void z_impl_ipc_msg_reply(struct sof_ipc_reply *reply) { struct ipc4_message_request in; @@ -1680,6 +1692,15 @@ void ipc_msg_reply(struct sof_ipc_reply *reply) ipc_compound_msg_done(in.primary.r.type, reply->error); } +#ifdef CONFIG_USERSPACE +void z_vrfy_ipc_msg_reply(struct sof_ipc_reply *reply) +{ + K_OOPS(K_SYSCALL_MEMORY_READ(reply, sizeof(*reply))); + z_impl_ipc_msg_reply(reply); +} +#include +#endif + void ipc_cmd(struct ipc_cmd_hdr *_hdr) { struct ipc4_message_request *in = ipc4_get_message_request(); diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index 3404906b9771..6d2f60ac4c66 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -340,9 +340,12 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, struct ipc_comp_dev *ipc_pipe; struct pipeline *pipe; struct ipc *ipc = ipc_get(); + struct k_heap *heap = sof_sys_user_heap_get(); assert_can_be_cold(); + LOG_INF("pipe_desc %x, instance %u", pipe_desc, pipe_desc->primary.r.instance_id); + /* check whether pipeline id is already taken or in use */ ipc_pipe = ipc_get_pipeline_by_id(ipc, pipe_desc->primary.r.instance_id); if (ipc_pipe) { @@ -352,8 +355,9 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, } /* create the pipeline */ - pipe = pipeline_new(NULL, pipe_desc->primary.r.instance_id, + pipe = pipeline_new(heap, pipe_desc->primary.r.instance_id, pipe_desc->primary.r.ppl_priority, 0, pparams); + LOG_INF("pipeline_new() -> %p", pipe); if (!pipe) { tr_err(&ipc_tr, "ipc: pipeline_new() failed"); return IPC4_OUT_OF_MEMORY; @@ -368,12 +372,13 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, pipe->core = pipe_desc->extension.r.core_id; /* allocate the IPC pipeline container */ - ipc_pipe = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - sizeof(struct ipc_comp_dev)); + ipc_pipe = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + sizeof(struct ipc_comp_dev), 0); if (!ipc_pipe) { pipeline_free(pipe); return IPC4_OUT_OF_MEMORY; } + memset(ipc_pipe, 0, sizeof(*ipc_pipe)); ipc_pipe->pipeline = pipe; ipc_pipe->type = COMP_TYPE_PIPELINE; @@ -384,6 +389,8 @@ __cold static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc, /* add new pipeline to the list */ list_item_append(&ipc_pipe->list, &ipc->comp_list); + LOG_INF("success"); + return IPC4_SUCCESS; } @@ -457,22 +464,51 @@ __cold static int ipc_pipeline_module_free(uint32_t pipeline_id) /* free sink buffer allocated by current component in bind function */ comp_dev_for_each_consumer_safe(icd->cd, buffer, safe) { +#if CONFIG_ZEPHYR_DP_SCHEDULER + struct k_heap *buf_heap = buffer->audio_buffer.heap; + struct comp_dev *orig_sink = comp_buffer_get_sink_component(buffer); + bool buf_is_dp = buf_heap && + (icd->cd->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP || + (orig_sink && orig_sink->ipc_config.proc_domain == + COMP_PROCESSING_DOMAIN_DP)); +#endif + pipeline_disconnect(icd->cd, buffer, PPL_CONN_DIR_COMP_TO_BUFFER); struct comp_dev *sink = comp_buffer_get_sink_component(buffer); /* free the buffer only when the sink module has also been disconnected */ - if (!sink) + if (!sink) { buffer_free(buffer); +#if CONFIG_ZEPHYR_DP_SCHEDULER + if (buf_is_dp) + dp_heap_put(buf_heap); +#endif + } } /* free source buffer allocated by current component in bind function */ comp_dev_for_each_producer_safe(icd->cd, buffer, safe) { +#if CONFIG_ZEPHYR_DP_SCHEDULER + struct k_heap *buf_heap = buffer->audio_buffer.heap; + struct comp_dev *orig_source = + comp_buffer_get_source_component(buffer); + bool buf_is_dp = buf_heap && + (icd->cd->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP || + (orig_source && orig_source->ipc_config.proc_domain == + COMP_PROCESSING_DOMAIN_DP)); +#endif + pipeline_disconnect(icd->cd, buffer, PPL_CONN_DIR_BUFFER_TO_COMP); struct comp_dev *source = comp_buffer_get_source_component(buffer); /* free the buffer only when the source module has also been disconnected */ - if (!source) + if (!source) { buffer_free(buffer); +#if CONFIG_ZEPHYR_DP_SCHEDULER + if (buf_is_dp) + dp_heap_put(buf_heap); +#endif + } } if (!cpu_is_me(icd->core)) @@ -521,7 +557,7 @@ __cold int ipc_pipeline_free(struct ipc *ipc, uint32_t comp_id) ipc_pipe->pipeline = NULL; list_item_del(&ipc_pipe->list); - rfree(ipc_pipe); + sof_heap_free(sof_sys_user_heap_get(), ipc_pipe); return IPC4_SUCCESS; } @@ -559,6 +595,23 @@ __cold static struct comp_buffer *ipc4_create_buffer(struct comp_dev *src, bool * disable any interrupts. */ +#if CONFIG_SOF_USERSPACE_LL +#define ll_block(cross_core_bind, flags) \ + do { \ + if (cross_core_bind) \ + domain_block(sof_get()->platform_timer_domain); \ + else \ + zephyr_ll_lock_sched(); \ + } while (0) + +#define ll_unblock(cross_core_bind, flags) \ + do { \ + if (cross_core_bind) \ + domain_unblock(sof_get()->platform_timer_domain); \ + else \ + zephyr_ll_unlock_sched(); \ + } while (0) +#else #define ll_block(cross_core_bind, flags) \ do { \ if (cross_core_bind) \ @@ -574,6 +627,7 @@ __cold static struct comp_buffer *ipc4_create_buffer(struct comp_dev *src, bool else \ irq_local_enable(flags); \ } while (0) +#endif /* CONFIG_SOF_USERSPACE_LL */ /* Calling both ll_block() and ll_wait_finished_on_core() makes sure LL will not start its * next cycle and its current cycle on specified core has finished. @@ -605,8 +659,13 @@ static int ll_wait_finished_on_core(struct comp_dev *dev) #else +#if CONFIG_SOF_USERSPACE_LL +#define ll_block(cross_core_bind, flags) zephyr_ll_lock_sched() +#define ll_unblock(cross_core_bind, flags) zephyr_ll_unlock_sched() +#else #define ll_block(cross_core_bind, flags) irq_local_disable(flags) #define ll_unblock(cross_core_bind, flags) irq_local_enable(flags) +#endif /* CONFIG_SOF_USERSPACE_LL */ #endif @@ -663,6 +722,14 @@ __cold int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) #else dp_heap = NULL; #endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ + +#ifdef CONFIG_SOF_USERSPACE_LL + if (!dp_heap) { + /* use system user heap for non-DP module buffers */ + dp_heap = sof_sys_user_heap_get(); + } +#endif + bool cross_core_bind = source->ipc_config.core != sink->ipc_config.core; /* If both components are on same core -- process IPC on that core, @@ -738,7 +805,7 @@ __cold int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) } #if CONFIG_ZEPHYR_DP_SCHEDULER - if (dp_heap) { + if (dp) { struct dp_heap_user *dp_user = container_of(dp_heap, struct dp_heap_user, heap); dp_user->client_count++; @@ -782,6 +849,8 @@ __cold int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) buf_get_id(buffer)); if (!ring_buffer) { buffer_free(buffer); + if (dp) + dp_heap_put(dp_heap); return IPC4_OUT_OF_MEMORY; } @@ -869,6 +938,10 @@ __cold int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) free: ll_unblock(cross_core_bind, flags); buffer_free(buffer); +#if CONFIG_ZEPHYR_DP_SCHEDULER + if (dp) + dp_heap_put(dp_heap); +#endif return IPC4_INVALID_RESOURCE_STATE; } @@ -950,6 +1023,13 @@ __cold int ipc_comp_disconnect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) #endif } +#if CONFIG_ZEPHYR_DP_SCHEDULER + struct k_heap *buf_heap = buffer->audio_buffer.heap; + bool buf_is_dp = buf_heap && + (src->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP || + sink->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP); +#endif + pipeline_disconnect(src, buffer, PPL_CONN_DIR_COMP_TO_BUFFER); pipeline_disconnect(sink, buffer, PPL_CONN_DIR_BUFFER_TO_COMP); /* these might call comp_ipc4_bind_remote() if necessary */ @@ -965,6 +1045,10 @@ __cold int ipc_comp_disconnect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) ll_unblock(cross_core_unbind, flags); buffer_free(buffer); +#if CONFIG_ZEPHYR_DP_SCHEDULER + if (buf_is_dp) + dp_heap_put(buf_heap); +#endif if (ret || ret1) return IPC4_INVALID_RESOURCE_ID; @@ -1034,7 +1118,7 @@ __cold int ipc4_chain_dma_state(struct comp_dev *dev, struct ipc4_chain_dma *cdm if (icd->cd != dev) continue; list_item_del(&icd->list); - rfree(icd); + sof_heap_free(sof_sys_user_heap_get(), icd); break; } comp_free(dev); @@ -1269,13 +1353,14 @@ __cold static int ipc4_add_comp_dev(struct comp_dev *dev) } /* allocate the IPC component container */ - icd = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - sizeof(struct ipc_comp_dev)); + icd = sof_heap_alloc(sof_sys_user_heap_get(), SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + sizeof(struct ipc_comp_dev), 0); if (!icd) { tr_err(&ipc_tr, "alloc failed"); - rfree(icd); + sof_heap_free(sof_sys_user_heap_get(), icd); return IPC4_OUT_OF_MEMORY; } + memset(icd, 0, sizeof(*icd)); icd->cd = dev; icd->type = COMP_TYPE_COMPONENT; diff --git a/src/lib/dai.c b/src/lib/dai.c index 8e45f8a4bb97..3f77fe0a4b20 100644 --- a/src/lib/dai.c +++ b/src/lib/dai.c @@ -11,6 +11,7 @@ #include #include #include +#include /* for zephyr_ll_user_heap() */ #include #include #include @@ -298,6 +299,11 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) { const struct device *dev; struct dai *d; + struct k_heap *heap = NULL; + +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif dev = dai_get_device(type, index); if (!dev) { @@ -306,10 +312,12 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) return NULL; } - d = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(struct dai)); + d = sof_heap_alloc(heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, sizeof(struct dai), 0); if (!d) return NULL; + memset(d, 0, sizeof(struct dai)); + d->index = index; d->type = type; d->dev = dev; @@ -319,7 +327,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) if (dai_probe(d->dev)) { tr_err(&dai_tr, "dai_get: failed to probe dai with index %d type %d", index, type); - rfree(d); + sof_heap_free(heap, d); return NULL; } @@ -330,6 +338,11 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) void dai_put(struct dai *dai) { int ret; + struct k_heap *heap = NULL; + +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif ret = dai_remove(dai->dev); if (ret < 0) { @@ -337,7 +350,7 @@ void dai_put(struct dai *dai) dai->index, ret); } - rfree(dai); + sof_heap_free(heap, dai); } #else static inline const struct dai_type_info *dai_find_type(uint32_t type) diff --git a/src/lib/dma.c b/src/lib/dma.c index 608ee091ac25..70ac5c975058 100644 --- a/src/lib/dma.c +++ b/src/lib/dma.c @@ -285,7 +285,8 @@ void dma_put(struct dma *dma) } #endif -int dma_sg_alloc(struct dma_sg_elem_array *elem_array, +int dma_sg_alloc(struct k_heap *heap, + struct dma_sg_elem_array *elem_array, uint32_t flags, uint32_t direction, uint32_t buffer_count, uint32_t buffer_bytes, @@ -293,11 +294,13 @@ int dma_sg_alloc(struct dma_sg_elem_array *elem_array, { int i; - elem_array->elems = rzalloc(SOF_MEM_FLAG_USER, - sizeof(struct dma_sg_elem) * buffer_count); + elem_array->elems = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, + sizeof(struct dma_sg_elem) * buffer_count, 0); if (!elem_array->elems) return -ENOMEM; + memset(elem_array->elems, 0, sizeof(struct dma_sg_elem) * buffer_count); + for (i = 0; i < buffer_count; i++) { elem_array->elems[i].size = buffer_bytes; // TODO: may count offsets once @@ -319,9 +322,9 @@ int dma_sg_alloc(struct dma_sg_elem_array *elem_array, return 0; } -void dma_sg_free(struct dma_sg_elem_array *elem_array) +void dma_sg_free(struct k_heap *heap, struct dma_sg_elem_array *elem_array) { - rfree(elem_array->elems); + sof_heap_free(heap, elem_array->elems); dma_sg_init(elem_array); } diff --git a/src/platform/library/lib/alloc.c b/src/platform/library/lib/alloc.c index 74cb926e4aff..804c7a2f4f6e 100644 --- a/src/platform/library/lib/alloc.c +++ b/src/platform/library/lib/alloc.c @@ -75,3 +75,8 @@ struct k_heap *sof_sys_heap_get(void) { return NULL; } + +struct k_heap *sof_sys_user_heap_get(void) +{ + return NULL; +} diff --git a/src/probe/probe.c b/src/probe/probe.c index f15ee84f7daf..19f62bf81a14 100644 --- a/src/probe/probe.c +++ b/src/probe/probe.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -176,7 +175,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) dma->config.dest_width = sizeof(uint32_t); dma->config.cyclic = 0; - err = dma_sg_alloc(&dma->config.elem_array, SOF_MEM_FLAG_USER, + err = dma_sg_alloc(NULL, &dma->config.elem_array, SOF_MEM_FLAG_USER, dma->config.direction, elem_num, elem_size, elem_addr, 0); if (err < 0) return err; @@ -255,7 +254,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) static int probe_dma_deinit(struct probe_dma_ext *dma) { int err = 0; - dma_sg_free(&dma->config.elem_array); + dma_sg_free(NULL, &dma->config.elem_array); #if CONFIG_ZEPHYR_NATIVE_DRIVERS err = dma_stop(dma->dc.dmac->z_dev, dma->dc.chan->index); #else @@ -902,14 +901,12 @@ static ssize_t probe_logging_hook(uint8_t *buffer, size_t length) * Extraction probe: generate format, header and copy data to probe buffer. * Injection probe: find corresponding DMA, check avail data, copy data, * update pointers and request more data from host if needed. - * \param[in] arg pointer (not used). - * \param[in] type of notify. - * \param[in] data pointer. + * \param[in] arg pointer to buffer_id. + * \param[in] cb_data pointer to buffer callback transaction data. */ -static void probe_cb_produce(void *arg, enum notify_id type, void *data) +static void probe_cb_produce(void *arg, struct buffer_cb_transact *cb_data) { struct probe_pdata *_probe = probe_get(); - struct buffer_cb_transact *cb_data = data; struct comp_buffer *buffer = cb_data->buffer; struct probe_dma_ext *dma; uint32_t buffer_id; @@ -921,7 +918,7 @@ static void probe_cb_produce(void *arg, enum notify_id type, void *data) uint32_t format; uint64_t checksum; - buffer_id = *(int *)arg; + buffer_id = *(uint32_t *)arg; /* search for probe point connected to this buffer */ for (i = 0; i < CONFIG_PROBE_POINTS_MAX; i++) @@ -1068,13 +1065,11 @@ static void probe_cb_produce(void *arg, enum notify_id type, void *data) /** * \brief Callback for buffer free, it will remove probe point. - * \param[in] arg pointer (not used). - * \param[in] type of notify. - * \param[in] data pointer. + * \param[in] arg pointer to buffer_id. */ -static void probe_cb_free(void *arg, enum notify_id type, void *data) +static void probe_cb_free(void *arg) { - uint32_t buffer_id = *(int *)arg; + uint32_t buffer_id = *(uint32_t *)arg; int ret; tr_dbg(&pr_tr, "buffer_id = %u", buffer_id); @@ -1315,16 +1310,13 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) probe_point_id_t *new_buf_id = &_probe->probe_points[first_free].buffer_id; #if CONFIG_IPC_MAJOR_4 - notifier_register(&new_buf_id->full_id, buf, NOTIFIER_ID_BUFFER_PRODUCE, - &probe_cb_produce, 0); - notifier_register(&new_buf_id->full_id, buf, NOTIFIER_ID_BUFFER_FREE, - &probe_cb_free, 0); + struct comp_buffer *probe_buf = buf; #else - notifier_register(&new_buf_id->full_id, dev->cb, NOTIFIER_ID_BUFFER_PRODUCE, - &probe_cb_produce, 0); - notifier_register(&new_buf_id->full_id, dev->cb, NOTIFIER_ID_BUFFER_FREE, - &probe_cb_free, 0); + struct comp_buffer *probe_buf = (struct comp_buffer *)dev->cb; #endif + probe_buf->probe_cb_produce = probe_cb_produce; + probe_buf->probe_cb_free = probe_cb_free; + probe_buf->probe_cb_arg = &new_buf_id->full_id; } } @@ -1444,19 +1436,20 @@ int probe_point_remove(uint32_t count, const uint32_t *buffer_id) if (dev) { buf = ipc4_get_buffer(dev, *buf_id); if (buf) { - notifier_unregister(NULL, buf, - NOTIFIER_ID_BUFFER_PRODUCE); - notifier_unregister(NULL, buf, - NOTIFIER_ID_BUFFER_FREE); + buf->probe_cb_produce = NULL; + buf->probe_cb_free = NULL; + buf->probe_cb_arg = NULL; } } #else dev = ipc_get_comp_by_id(ipc_get(), buffer_id[i]); if (dev) { - notifier_unregister(&buf_id->full_id, dev->cb, - NOTIFIER_ID_BUFFER_PRODUCE); - notifier_unregister(&buf_id->full_id, dev->cb, - NOTIFIER_ID_BUFFER_FREE); + struct comp_buffer *probe_buf = + (struct comp_buffer *)dev->cb; + + probe_buf->probe_cb_produce = NULL; + probe_buf->probe_cb_free = NULL; + probe_buf->probe_cb_arg = NULL; } #endif _probe->probe_points[j].stream_tag = diff --git a/src/schedule/schedule.c b/src/schedule/schedule.c index 5e56b6c490e0..3b80ab4bf552 100644 --- a/src/schedule/schedule.c +++ b/src/schedule/schedule.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -47,15 +48,20 @@ int schedule_task_init(struct task *task, static void scheduler_register(struct schedule_data *scheduler) { struct schedulers **sch = arch_schedulers_get(); + struct k_heap *heap = NULL; +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif if (!*sch) { /* init schedulers list */ - *sch = rzalloc(SOF_MEM_FLAG_KERNEL, - sizeof(**sch)); + *sch = sof_heap_alloc(heap, SOF_MEM_FLAG_KERNEL, + sizeof(**sch), 0); if (!*sch) { tr_err(&sch_tr, "allocation failed"); return; } + memset(*sch, 0, sizeof(**sch)); list_init(&(*sch)->list); } @@ -65,16 +71,21 @@ static void scheduler_register(struct schedule_data *scheduler) void scheduler_init(int type, const struct scheduler_ops *ops, void *data) { struct schedule_data *sch; + struct k_heap *heap = NULL; +#ifdef CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_user_heap(); +#endif if (!ops || !ops->schedule_task || !ops->schedule_task_cancel || !ops->schedule_task_free) return; - sch = rzalloc(SOF_MEM_FLAG_KERNEL, sizeof(*sch)); + sch = sof_heap_alloc(heap, SOF_MEM_FLAG_KERNEL, sizeof(*sch), 0); if (!sch) { tr_err(&sch_tr, "allocation failed"); sof_panic(SOF_IPC_PANIC_IPC); } + memset(sch, 0, sizeof(*sch)); list_init(&sch->list); sch->type = type; sch->ops = ops; diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 6a5812353d9e..f77ef40a1933 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -126,7 +126,8 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) } #endif - dt->handler(dt->arg); + if (dt->handler) + dt->handler(dt->arg); #ifdef CONFIG_SCHEDULE_LL_STATS_LOG cycles1 = k_cycle_get_32(); @@ -287,61 +288,65 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, #else /* CONFIG_SOF_USERSPACE_LL */ -/* User-space implementation for register/unregister */ - -static int zephyr_domain_register_user(struct ll_schedule_domain *domain, - struct task *task, - void (*handler)(void *arg), void *arg) +/* + * Privileged thread initialization for userspace LL scheduling. + * Creates the scheduling thread, sets up timer, grants access to kernel + * objects. Must be called from kernel context before any user-space + * domain_register() calls. + */ +static int zephyr_domain_thread_init(struct ll_schedule_domain *domain, + struct task *task) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); - int core = cpu_get_id(); - struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; - char thread_name[] = "ll_thread0"; + struct zephyr_domain_thread *dt; + char thread_name[] = "userll_thread0"; k_tid_t thread; + int core; - tr_dbg(&ll_tr, "entry"); + tr_dbg(&ll_tr, "thread_init entry"); - /* domain work only needs registered once on each core */ - if (dt->handler) - return 0; + if (task->core < 0 || task->core >= CONFIG_CORE_COUNT) + return -EINVAL; - __ASSERT_NO_MSG(task->core == core); + core = task->core; + dt = zephyr_domain->domain_thread + core; - dt->handler = handler; - dt->arg = arg; + /* thread only needs to be created once per core */ + if (dt->ll_thread) + return 0; + + dt->handler = NULL; /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ k_sem_init(dt->sem, 0, 10); thread_name[sizeof(thread_name) - 2] = '0' + core; + /* Allocate thread structure dynamically */ + dt->ll_thread = k_object_alloc(K_OBJ_THREAD); if (!dt->ll_thread) { - /* Allocate thread structure dynamically */ - dt->ll_thread = k_object_alloc(K_OBJ_THREAD); - if (!dt->ll_thread) { - tr_err(&ll_tr, "Failed to allocate thread object for core %d", core); - dt->handler = NULL; - dt->arg = NULL; - return -ENOMEM; - } + tr_err(&ll_tr, "Failed to allocate thread object for core %d", core); + dt->handler = NULL; + dt->arg = NULL; + return -ENOMEM; + } - thread = k_thread_create(dt->ll_thread, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE, - zephyr_domain_thread_fn, zephyr_domain, - INT_TO_POINTER(core), NULL, CONFIG_LL_THREAD_PRIORITY, - K_USER, K_FOREVER); + thread = k_thread_create(dt->ll_thread, ll_sched_stack[core], ZEPHYR_LL_STACK_SIZE, + zephyr_domain_thread_fn, zephyr_domain, + INT_TO_POINTER(core), NULL, CONFIG_LL_THREAD_PRIORITY, + K_USER, K_FOREVER); - k_thread_cpu_mask_clear(thread); - k_thread_cpu_mask_enable(thread, core); - k_thread_name_set(thread, thread_name); + k_thread_cpu_mask_clear(thread); + k_thread_cpu_mask_enable(thread, core); + k_thread_name_set(thread, thread_name); - k_mem_domain_add_thread(zephyr_ll_mem_domain(), thread); - k_thread_access_grant(thread, dt->sem, domain->lock, zephyr_domain->timer); - user_grant_dai_access_all(thread); - user_grant_dma_access_all(thread); - tr_dbg(&ll_tr, "granted LL access to thread %p (core %d)", thread, core); + k_mem_domain_add_thread(zephyr_ll_mem_domain(), thread); + k_thread_access_grant(thread, dt->sem, domain->lock, zephyr_domain->timer); + user_grant_dai_access_all(thread); + user_grant_dma_access_all(thread); + tr_dbg(&ll_tr, "granted LL access to thread %p (core %d)", thread, core); - k_thread_start(thread); - } + k_thread_start(thread); k_mutex_lock(domain->lock, K_FOREVER); if (!k_timer_user_data_get(zephyr_domain->timer)) { @@ -364,6 +369,43 @@ static int zephyr_domain_register_user(struct ll_schedule_domain *domain, return 0; } +/* + * User-space register: bookkeeping only. The privileged thread setup has + * already been done by domain_thread_init() called from kernel context. + */ +static int zephyr_domain_register_user(struct ll_schedule_domain *domain, + struct task *task, + void (*handler)(void *arg), void *arg) +{ + struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); + struct zephyr_domain_thread *dt; + int core; + + tr_dbg(&ll_tr, "register_user entry"); + + if (task->core < 0 || task->core >= CONFIG_CORE_COUNT) + return -EINVAL; + + core = task->core; + dt = zephyr_domain->domain_thread + core; + + if (!dt->ll_thread) { + tr_err(&ll_tr, "domain_thread_init() not called for core %d", core); + return -EINVAL; + } + + __ASSERT_NO_MSG(!dt->handler || dt->handler == handler); + if (dt->handler) + return 0; + + dt->handler = handler; + dt->arg = arg; + + tr_info(&ll_tr, "task registered on core %d", core); + + return 0; +} + static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks) { @@ -378,14 +420,6 @@ static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain, k_mutex_lock(domain->lock, K_FOREVER); - if (!atomic_read(&domain->total_num_tasks)) { - /* Disable the watchdog */ - watchdog_disable(core); - - k_timer_stop(zephyr_domain->timer); - k_timer_user_data_set(zephyr_domain->timer, NULL); - } - zephyr_domain->domain_thread[core].handler = NULL; k_mutex_unlock(domain->lock); @@ -404,12 +438,51 @@ static int zephyr_domain_unregister_user(struct ll_schedule_domain *domain, return 0; } -struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain) +/* + * Free resources acquired by zephyr_domain_thread_init(). + * Stops the timer, aborts the scheduling thread and frees the thread object. + * Must be called from kernel context. + */ +static void zephyr_domain_thread_free(struct ll_schedule_domain *domain, + uint32_t num_tasks) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); int core = cpu_get_id(); struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; + tr_dbg(&ll_tr, "thread_free entry, core %d, num_tasks %u", core, num_tasks); + + /* Still tasks on other cores, only clean up this core's thread */ + k_mutex_lock(domain->lock, K_FOREVER); + + if (!num_tasks && !atomic_read(&domain->total_num_tasks)) { + /* Last task globally: stop the timer and watchdog */ + watchdog_disable(core); + + k_timer_stop(zephyr_domain->timer); + k_timer_user_data_set(zephyr_domain->timer, NULL); + } + + dt->handler = NULL; + dt->arg = NULL; + + k_mutex_unlock(domain->lock); + + if (dt->ll_thread) { + k_thread_abort(dt->ll_thread); + k_object_free(dt->ll_thread); + dt->ll_thread = NULL; + } + + tr_info(&ll_tr, "thread_free done, core %d", core); +} + +struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain) +{ + struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); + int core = 0; /* cpu_get_id(); */ + struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; + tr_dbg(&ll_tr, "entry"); return dt->ll_thread; @@ -446,6 +519,8 @@ APP_TASK_DATA static const struct ll_schedule_domain_ops zephyr_domain_ops = { #ifdef CONFIG_SOF_USERSPACE_LL .domain_register = zephyr_domain_register_user, .domain_unregister = zephyr_domain_unregister_user, + .domain_thread_init = zephyr_domain_thread_init, + .domain_thread_free = zephyr_domain_thread_free, #else .domain_register = zephyr_domain_register, .domain_unregister = zephyr_domain_unregister, diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 575a82d91dda..8e987d0dcf6b 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -40,7 +40,10 @@ struct zephyr_ll { struct zephyr_ll_pdata { bool run; bool freeing; - struct k_sem sem; +#ifndef CONFIG_SOF_USERSPACE_LL + struct k_sem sem_obj; +#endif + struct k_sem *sem; }; static void zephyr_ll_lock(struct zephyr_ll *sch, uint32_t *flags) @@ -87,7 +90,7 @@ static void zephyr_ll_task_done(struct zephyr_ll *sch, * zephyr_ll_task_free() is trying to free this task. Complete * it and signal the semaphore to let the function proceed */ - k_sem_give(&pdata->sem); + k_sem_give(pdata->sem); tr_info(&ll_tr, "task complete %p %pU", task, task->uid); tr_info(&ll_tr, "num_tasks %d total_num_tasks %ld", @@ -360,17 +363,7 @@ static int zephyr_ll_task_schedule_common(struct zephyr_ll *sch, struct task *ta ret = domain_register(sch->ll_domain, task, &schedule_ll_callback, sch); if (ret < 0) - tr_err(&ll_tr, "cannot register domain %d", - ret); - -#if CONFIG_SOF_USERSPACE_LL - k_thread_access_grant(zephyr_domain_thread_tid(sch->ll_domain), sch->lock); - - tr_dbg(&ll_tr, "granting access to lock %p for thread %p", sch->lock, - zephyr_domain_thread_tid(sch->ll_domain)); - tr_dbg(&ll_tr, "granting access to domain lock %p for thread %p", &sch->ll_domain->lock, - zephyr_domain_thread_tid(sch->ll_domain)); -#endif + tr_err(&ll_tr, "cannot register domain %d", ret); return 0; } @@ -403,7 +396,7 @@ static int zephyr_ll_task_schedule_after(void *data, struct task *task, uint64_t * This is synchronous - after this returns the object can be destroyed! * Assertion: under Zephyr this is always called from a thread context! */ -static int zephyr_ll_task_free(void *data, struct task *task) +static int zephyr_ll_task_sched_free(void *data, struct task *task) { struct zephyr_ll *sch = data; uint32_t flags; @@ -454,7 +447,11 @@ static int zephyr_ll_task_free(void *data, struct task *task) if (must_wait) /* Wait for up to 100 periods */ - k_sem_take(&pdata->sem, K_USEC(LL_TIMER_PERIOD_US * 100)); + k_sem_take(pdata->sem, K_USEC(LL_TIMER_PERIOD_US * 100)); + +#ifdef CONFIG_SOF_USERSPACE_LL + k_object_free(pdata->sem); +#endif /* Protect against racing with schedule_task() */ zephyr_ll_lock(sch, &flags); @@ -511,13 +508,55 @@ static void zephyr_ll_scheduler_free(void *data, uint32_t flags) sch->n_tasks); } +#if CONFIG_SOF_USERSPACE_LL +struct k_thread *zephyr_ll_init_context(void *data, struct task *task) +{ + struct zephyr_ll *sch = data; + int ret; + + /* + * Use domain_thread_init() for privileged setup (thread creation, + * timer, access grants). domain_register() is now bookkeeping only + * and will be called later from user context when scheduling tasks. + */ + ret = domain_thread_init(sch->ll_domain, task); + if (ret < 0) { + tr_err(&ll_tr, "cannot init_context %d", ret); + return NULL; + } + + if (!k_is_user_context()) { + k_thread_access_grant(zephyr_domain_thread_tid(sch->ll_domain), sch->lock); + + tr_dbg(&ll_tr, "granting access to lock %p for thread %p", sch->lock, + zephyr_domain_thread_tid(sch->ll_domain)); + tr_dbg(&ll_tr, "granting access to domain lock %p for thread %p", &sch->ll_domain->lock, + zephyr_domain_thread_tid(sch->ll_domain)); + } + + return zephyr_domain_thread_tid(sch->ll_domain); +} + +void zephyr_ll_free_context(void *data) +{ + struct zephyr_ll *sch = data; + + tr_info(&ll_tr, "free the domain thread"); + domain_thread_free(sch->ll_domain, sch->n_tasks); +} +#endif + static const struct scheduler_ops zephyr_ll_ops = { .schedule_task = zephyr_ll_task_schedule, .schedule_task_before = zephyr_ll_task_schedule_before, .schedule_task_after = zephyr_ll_task_schedule_after, - .schedule_task_free = zephyr_ll_task_free, + .schedule_task_free = zephyr_ll_task_sched_free, .schedule_task_cancel = zephyr_ll_task_cancel, .scheduler_free = zephyr_ll_scheduler_free, +#if CONFIG_SOF_USERSPACE_LL + .scheduler_init_context = zephyr_ll_init_context, + .scheduler_free_context = zephyr_ll_free_context, +#endif }; #if CONFIG_SOF_USERSPACE_LL @@ -526,6 +565,43 @@ struct task *zephyr_ll_task_alloc(void) return sof_heap_alloc(zephyr_ll_user_heap(), SOF_MEM_FLAG_USER, sizeof(struct task), sizeof(void *)); } + +void zephyr_ll_task_free(struct task *task) +{ + sof_heap_free(zephyr_ll_user_heap(), task); +} + +void zephyr_ll_grant_access(struct k_thread *thread) +{ + struct zephyr_ll *ll_sch = (struct zephyr_ll *)scheduler_get_data(SOF_SCHEDULE_LL_TIMER); + + k_thread_access_grant(thread, ll_sch->lock); +} + +/** + * Lock the LL scheduler to prevent it from processing tasks. + * + * Uses the LL scheduler's own k_mutex which is re-entrant, so + * schedule_task() calls within the locked section will not deadlock. + * Must be paired with zephyr_ll_unlock_sched(). + */ +void zephyr_ll_lock_sched(void) +{ + struct zephyr_ll *sch = (struct zephyr_ll *)scheduler_get_data(SOF_SCHEDULE_LL_TIMER); + + k_mutex_lock(sch->lock, K_FOREVER); +} + +/** + * Unlock the LL scheduler after a previous zephyr_ll_lock_sched() call. + */ +void zephyr_ll_unlock_sched(void) +{ + struct zephyr_ll *sch = (struct zephyr_ll *)scheduler_get_data(SOF_SCHEDULE_LL_TIMER); + + k_mutex_unlock(sch->lock); +} + #endif /* CONFIG_SOF_USERSPACE_LL */ int zephyr_ll_task_init(struct task *task, @@ -560,7 +636,12 @@ int zephyr_ll_task_init(struct task *task, memset(pdata, 0, sizeof(*pdata)); - k_sem_init(&pdata->sem, 0, 1); +#ifdef CONFIG_SOF_USERSPACE_LL + pdata->sem = k_object_alloc(K_OBJ_SEM); +#else + pdata->sem = &pdata->sem_obj; +#endif + k_sem_init(pdata->sem, 0, 1); task->priv_data = pdata; diff --git a/src/schedule/zephyr_ll_user.c b/src/schedule/zephyr_ll_user.c index aa33807b4aa3..a0b833ec3a64 100644 --- a/src/schedule/zephyr_ll_user.c +++ b/src/schedule/zephyr_ll_user.c @@ -17,15 +17,17 @@ LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL); * * This structure encapsulates the memory management resources required for the * low-latency (LL) scheduler in userspace mode. It provides memory isolation - * and heap management for LL scheduler threads. + * and heap management for LL scheduler threads. Only kernel accessible. */ struct zephyr_ll_mem_resources { struct k_mem_domain mem_domain; /**< Memory domain for LL thread isolation */ - struct k_heap *heap; /**< Heap allocator for LL scheduler memory */ }; static struct zephyr_ll_mem_resources ll_mem_resources; +/* Heap allocator for LL scheduler memory (user accessible pointer) */ +APP_TASK_DATA static struct k_heap *zephyr_ll_heap; + static struct k_heap *zephyr_ll_heap_init(void) { struct k_heap *heap = module_driver_heap_init(); @@ -53,6 +55,7 @@ static struct k_heap *zephyr_ll_heap_init(void) if (ret) k_panic(); +#ifdef CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS mem_partition.start = (uintptr_t)sys_cache_uncached_ptr_get(heap->heap.init_mem); mem_partition.attr = K_MEM_PARTITION_P_RW_U_RW; ret = k_mem_domain_add_partition(&ll_mem_resources.mem_domain, &mem_partition); @@ -60,6 +63,7 @@ static struct k_heap *zephyr_ll_heap_init(void) (void *)mem_partition.start, heap->heap.init_bytes, ret); if (ret) k_panic(); +#endif return heap; } @@ -68,15 +72,16 @@ void zephyr_ll_user_resources_init(void) { k_mem_domain_init(&ll_mem_resources.mem_domain, 0, NULL); - ll_mem_resources.heap = zephyr_ll_heap_init(); + zephyr_ll_heap = zephyr_ll_heap_init(); /* attach common partition to LL domain */ user_memory_attach_common_partition(zephyr_ll_mem_domain()); + user_memory_attach_system_user_partition(zephyr_ll_mem_domain()); } struct k_heap *zephyr_ll_user_heap(void) { - return ll_mem_resources.heap; + return zephyr_ll_heap; } struct k_mem_domain *zephyr_ll_mem_domain(void) diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index 0523a362e397..554204ac5c70 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -407,7 +407,7 @@ void dma_trace_disable(struct dma_trace_data *d) #if (CONFIG_HOST_PTABLE) /* Free up the host SG if it is set */ if (d->host_size) { - dma_sg_free(&d->config.elem_array); + dma_sg_free(NULL, &d->config.elem_array); d->host_size = 0; } #endif diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index 60b6215c4cd5..16fee8f2ff48 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -59,16 +59,6 @@ void WEAK *rzalloc(uint32_t flags, return calloc(bytes, 1); } -void WEAK *rbrealloc_align(void *ptr, uint32_t flags, - size_t bytes, size_t old_bytes, uint32_t alignment) -{ - (void)flags; - (void)old_bytes; - (void)alignment; - - return realloc(ptr, bytes); -} - void WEAK *rmalloc_align(uint32_t flags, size_t bytes, uint32_t alignment) { (void)flags; diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 233c93f3e070..38277784d10b 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -545,6 +545,9 @@ zephyr_library_sources_ifdef(CONFIG_SHELL zephyr_syscall_header(${SOF_SRC_PATH}/include/sof/audio/module_adapter/module/generic.h) zephyr_syscall_header(${SOF_SRC_PATH}/include/sof/lib/fast-get.h) +zephyr_syscall_header(${SOF_SRC_PATH}/include/sof/ipc/ipc_reply.h) +zephyr_syscall_header(include/rtos/alloc.h) +zephyr_library_sources(syscall/alloc.c) zephyr_library_link_libraries(SOF) target_link_libraries(SOF INTERFACE zephyr_interface) diff --git a/zephyr/Kconfig b/zephyr/Kconfig index c744b965a07f..accebae6fa73 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -29,6 +29,13 @@ config SOF_USERSPACE_INTERFACE_DMA help Allow user-space threads to use the SOF DMA interface. +config SOF_USERSPACE_INTERFACE_ALLOC + bool "Enable SOF heap alloc interface to userspace threads" + depends on USERSPACE + help + Allow user-space threads to use sof_heap_alloc/sof_heap_free + as Zephyr system calls. + config SOF_USERSPACE_LL bool "Run Low-Latency pipelines in userspace threads" depends on USERSPACE diff --git a/zephyr/include/rtos/alloc.h b/zephyr/include/rtos/alloc.h index 116789ea6ead..f51ef77554be 100644 --- a/zephyr/include/rtos/alloc.h +++ b/zephyr/include/rtos/alloc.h @@ -88,29 +88,6 @@ static inline void *rballoc(uint32_t flags, size_t bytes) return rballoc_align(flags, bytes, PLATFORM_DCACHE_ALIGN); } -/** - * Changes size of the memory block allocated. - * @param ptr Address of the block to resize. - * @param flags Flags, see SOF_MEM_FLAG_... - * @param bytes New size in bytes. - * @param old_bytes Old size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the resized memory of NULL if failed. - */ -void *rbrealloc_align(void *ptr, uint32_t flags, size_t bytes, - size_t old_bytes, uint32_t alignment); - -/** - * Similar to rballoc_align(), returns resized buffer aligned to - * PLATFORM_DCACHE_ALIGN. - */ -static inline void *rbrealloc(void *ptr, uint32_t flags, - size_t bytes, size_t old_bytes) -{ - return rbrealloc_align(ptr, flags, bytes, old_bytes, - PLATFORM_DCACHE_ALIGN); -} - /** * Frees the memory block. * @param ptr Pointer to the memory block. @@ -122,11 +99,29 @@ void rfree(void *ptr); */ void l3_heap_save(void); -void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, - size_t alignment); -void sof_heap_free(struct k_heap *heap, void *addr); +__syscall void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, + size_t alignment); + +void *z_impl_sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, + size_t alignment); + +__syscall void sof_heap_free(struct k_heap *heap, void *addr); + +void z_impl_sof_heap_free(struct k_heap *heap, void *addr); + struct k_heap *sof_sys_heap_get(void); +/** + * Returns heap object to use for SOF heap allocations + * for audio application code. + * + * This should not be used for heap allocations for objects that + * are only used in SOF kernel space. + * + * Note: audio modules should use mod_alloc() instead! + */ +struct k_heap *sof_sys_user_heap_get(void); + /* TODO: remove - debug only - only needed for linking */ static inline void heap_trace_all(int force) {} @@ -149,4 +144,6 @@ size_t get_shared_buffer_heap_size(void); #endif +#include + #endif /* __ZEPHYR_RTOS_ALLOC_H__ */ diff --git a/zephyr/include/rtos/sof.h b/zephyr/include/rtos/sof.h index 1de60c5cd6ff..2c55d08a9d80 100644 --- a/zephyr/include/rtos/sof.h +++ b/zephyr/include/rtos/sof.h @@ -46,8 +46,10 @@ struct sof { int argc; char **argv; +#ifndef CONFIG_SOF_USERSPACE_LL /* ipc */ struct ipc *ipc; +#endif /* system agent */ struct sa *sa; diff --git a/zephyr/include/rtos/userspace_helper.h b/zephyr/include/rtos/userspace_helper.h index 29635fb942ad..6951d6dcdd43 100644 --- a/zephyr/include/rtos/userspace_helper.h +++ b/zephyr/include/rtos/userspace_helper.h @@ -26,6 +26,9 @@ #define APP_TASK_BSS K_APP_BMEM(common_partition) #define APP_TASK_DATA K_APP_DMEM(common_partition) +#define APP_SYSUSER_BSS K_APP_BMEM(sysuser_partition) +#define APP_SYSUSER_DATA K_APP_DMEM(sysuser_partition) + struct processing_module; struct userspace_context; @@ -137,4 +140,26 @@ static inline int user_access_to_mailbox(struct k_mem_domain *domain, k_tid_t th #endif /* CONFIG_USERSPACE */ +#ifdef CONFIG_SOF_USERSPACE_LL + +int user_memory_attach_system_user_partition(struct k_mem_domain *dom); + +#else + +/** + * Attach SOF system user memory partition to a memory domain. + * @param dom - memory domain to attach the common partition to. + * + * @return 0 for success, error otherwise. + * + * @note + * Function used only when CONFIG_USERSPACE is set. + * The common partition contains shared objects required by user-space modules. + */ +static int user_memory_attach_system_user_partition(struct k_mem_domain *dom) +{ +} + +#endif /* CONFIG_SOF_USERSPACE_LL */ + #endif /* __ZEPHYR_LIB_USERSPACE_HELPER_H__ */ diff --git a/zephyr/include/sof/lib/cpu.h b/zephyr/include/sof/lib/cpu.h index c23405e85121..533cb29f3602 100644 --- a/zephyr/include/sof/lib/cpu.h +++ b/zephyr/include/sof/lib/cpu.h @@ -55,7 +55,11 @@ static inline bool cpu_is_primary(int id) static inline bool cpu_is_me(int id) { +#ifdef CONFIG_SOF_USERSPACE_LL + return true; +#else return id == cpu_get_id(); +#endif } int cpu_enable_core(int id); diff --git a/zephyr/include/sof/lib/dma.h b/zephyr/include/sof/lib/dma.h index b13f3c25221b..e85f4b4d2e80 100644 --- a/zephyr/include/sof/lib/dma.h +++ b/zephyr/include/sof/lib/dma.h @@ -34,6 +34,7 @@ struct comp_buffer; struct comp_dev; +struct k_heap; /** \addtogroup sof_dma_drivers DMA Drivers * SOF DMA Drivers API specification (deprecated interface, to be @@ -291,13 +292,14 @@ static inline void dma_sg_init(struct dma_sg_elem_array *ea) ea->elems = NULL; } -int dma_sg_alloc(struct dma_sg_elem_array *ea, +int dma_sg_alloc(struct k_heap *heap, + struct dma_sg_elem_array *ea, uint32_t flags, uint32_t direction, uint32_t buffer_count, uint32_t buffer_bytes, uintptr_t dma_buffer_addr, uintptr_t external_addr); -void dma_sg_free(struct dma_sg_elem_array *ea); +void dma_sg_free(struct k_heap *heap, struct dma_sg_elem_array *ea); /** * \brief Get the total size of SG buffer diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index f95f66fbdb7f..578f30984106 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -13,6 +13,7 @@ #include #include #include +#include /* for zephyr_ll_user_heap() */ #include #include #include @@ -382,6 +383,16 @@ struct k_heap *sof_sys_heap_get(void) return &sof_heap; } +struct k_heap *sof_sys_user_heap_get(void) +{ +#ifdef CONFIG_SOF_USERSPACE_LL + return zephyr_ll_user_heap(); +#else + /* let sof_heap_alloc() pick */ + return NULL; +#endif +} + static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) { k_spinlock_key_t key; @@ -619,8 +630,8 @@ EXPORT_SYMBOL(rfree); * To match the fall-back SOF main heap all private heaps should also be in the * uncached address range. */ -void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, - size_t alignment) +void *z_impl_sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, + size_t alignment) { if (flags & (SOF_MEM_FLAG_LARGE_BUFFER | SOF_MEM_FLAG_USER_SHARED_BUFFER)) return rballoc_align(flags, bytes, alignment); @@ -634,7 +645,7 @@ void *sof_heap_alloc(struct k_heap *heap, uint32_t flags, size_t bytes, return (__sparse_force void *)heap_alloc_aligned_cached(heap, alignment, bytes); } -void sof_heap_free(struct k_heap *heap, void *addr) +void z_impl_sof_heap_free(struct k_heap *heap, void *addr) { if (heap && addr && is_heap_pointer(heap, addr)) heap_free(heap, addr); diff --git a/zephyr/lib/dma.c b/zephyr/lib/dma.c index 7452459f8b0a..0b8987ed65d0 100644 --- a/zephyr/lib/dma.c +++ b/zephyr/lib/dma.c @@ -15,12 +15,13 @@ #include #include #include +#include #include #define DW_DMA_BUFFER_PERIOD_COUNT 0x4 #define HDA_DMA_BUFFER_PERIOD_COUNT 4 -SHARED_DATA struct sof_dma dma[] = { +APP_TASK_DATA SHARED_DATA struct sof_dma dma[] = { #if DT_NODE_HAS_STATUS(DT_NODELABEL(lpgpdma0), okay) { /* Low Power GP DMAC 0 */ .plat_data = { diff --git a/zephyr/lib/userspace_helper.c b/zephyr/lib/userspace_helper.c index 8c4aef423e15..c7c361295269 100644 --- a/zephyr/lib/userspace_helper.c +++ b/zephyr/lib/userspace_helper.c @@ -36,6 +36,10 @@ LOG_MODULE_REGISTER(userspace_helper, CONFIG_SOF_LOG_LEVEL); K_APPMEM_PARTITION_DEFINE(common_partition); +#ifdef CONFIG_SOF_USERSPACE_LL +K_APPMEM_PARTITION_DEFINE(sysuser_partition); +#endif + struct k_heap *module_driver_heap_init(void) { struct k_heap *mod_drv_heap = rballoc(SOF_MEM_FLAG_USER, sizeof(*mod_drv_heap)); @@ -83,6 +87,10 @@ int user_memory_attach_common_partition(struct k_mem_domain *dom) } #ifdef CONFIG_SOF_USERSPACE_LL +int user_memory_attach_system_user_partition(struct k_mem_domain *dom) +{ + return k_mem_domain_add_partition(dom, &sysuser_partition); +} int user_access_to_mailbox(struct k_mem_domain *domain, k_tid_t thread_id) { diff --git a/zephyr/schedule.c b/zephyr/schedule.c index 75155b5d4913..94623b2ed5db 100644 --- a/zephyr/schedule.c +++ b/zephyr/schedule.c @@ -10,10 +10,11 @@ #include #include #include +#include #include #include -static struct schedulers *_schedulers[CONFIG_CORE_COUNT]; +static APP_TASK_BSS struct schedulers *_schedulers[CONFIG_CORE_COUNT]; /** * Retrieves registered schedulers. @@ -21,6 +22,11 @@ static struct schedulers *_schedulers[CONFIG_CORE_COUNT]; */ struct schedulers **arch_schedulers_get(void) { + if (k_is_user_context()) { + printk("FIXME: using core0 scheduler\n"); + return _schedulers; + } + return _schedulers + cpu_get_id(); } EXPORT_SYMBOL(arch_schedulers_get); diff --git a/zephyr/syscall/alloc.c b/zephyr/syscall/alloc.c new file mode 100644 index 000000000000..fad39865b9d2 --- /dev/null +++ b/zephyr/syscall/alloc.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2026 Intel Corporation. + +#include +#include +#include + +static inline void *z_vrfy_sof_heap_alloc(struct k_heap *heap, uint32_t flags, + size_t bytes, size_t alignment) +{ + return z_impl_sof_heap_alloc(heap, flags, bytes, alignment); +} +#include + +static inline void z_vrfy_sof_heap_free(struct k_heap *heap, void *addr) +{ + z_impl_sof_heap_free(heap, addr); +} +#include diff --git a/zephyr/syscall/sof_dma.c b/zephyr/syscall/sof_dma.c index ed69ffc78423..11ee8156f7ff 100644 --- a/zephyr/syscall/sof_dma.c +++ b/zephyr/syscall/sof_dma.c @@ -119,9 +119,15 @@ static inline struct dma_block_config *deep_copy_dma_blk_cfg_list(struct dma_con for (user_next = cfg->head_block, kern_next = kern_cfg; user_next; - user_next = user_next->next_block, kern_next++) { - if (++i > cfg->block_count) - goto err; + user_next = user_next->next_block, kern_next++, i++) { + if (i == cfg->block_count) { + /* last block can point to first one */ + if (user_next != cfg->head_block) + goto err; + + kern_prev->next_block = kern_cfg; + break; + } if (k_usermode_from_copy(kern_next, user_next, sizeof(*kern_next))) goto err; diff --git a/zephyr/test/CMakeLists.txt b/zephyr/test/CMakeLists.txt index c5b66c83bbaa..b276bb307259 100644 --- a/zephyr/test/CMakeLists.txt +++ b/zephyr/test/CMakeLists.txt @@ -5,6 +5,9 @@ if(CONFIG_SOF_BOOT_TEST) zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace/ksem.c ) + if(CONFIG_USERSPACE AND CONFIG_SOF_USERSPACE_INTERFACE_ALLOC) + zephyr_library_sources(userspace/test_heap_alloc.c) + endif() endif() if(CONFIG_SOF_BOOT_TEST_STANDALONE AND CONFIG_SOF_USERSPACE_INTERFACE_DMA) diff --git a/zephyr/test/userspace/test_heap_alloc.c b/zephyr/test/userspace/test_heap_alloc.c new file mode 100644 index 000000000000..8018e4b27b9d --- /dev/null +++ b/zephyr/test/userspace/test_heap_alloc.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2026 Intel Corporation. + */ + +/* + * Test case for sof_heap_alloc() / sof_heap_free() use from a Zephyr + * user-space thread. + */ + +#include +#include +#include + +#include +#include +#include + +LOG_MODULE_DECLARE(sof_boot_test, LOG_LEVEL_DBG); + +#define USER_STACKSIZE 2048 + +static struct k_thread user_thread; +static K_THREAD_STACK_DEFINE(user_stack, USER_STACKSIZE); + +static void user_function(void *p1, void *p2, void *p3) +{ + struct k_heap *heap = (struct k_heap *)p1; + void *ptr; + + __ASSERT(k_is_user_context(), "isn't user"); + + LOG_INF("SOF thread %s (%s)", + k_is_user_context() ? "UserSpace!" : "privileged mode.", + CONFIG_BOARD_TARGET); + + /* allocate a block from the user heap */ + ptr = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, 128, 0); + zassert_not_null(ptr, "sof_heap_alloc returned NULL"); + + LOG_INF("sof_heap_alloc returned %p", ptr); + + /* free the block */ + sof_heap_free(heap, ptr); + + LOG_INF("sof_heap_free done"); +} + +static void test_user_thread_heap_alloc(void) +{ + struct k_heap *heap; + + heap = zephyr_ll_user_heap(); + zassert_not_null(heap, "user heap not found"); + + k_thread_create(&user_thread, user_stack, USER_STACKSIZE, + user_function, heap, NULL, NULL, + -1, K_USER, K_FOREVER); + + /* Add thread to LL memory domain so it can access the user heap */ + k_mem_domain_add_thread(zephyr_ll_mem_domain(), &user_thread); + + k_thread_start(&user_thread); + k_thread_join(&user_thread, K_FOREVER); +} + +ZTEST(sof_boot, user_space_heap_alloc) +{ + test_user_thread_heap_alloc(); + + ztest_test_pass(); +} diff --git a/zephyr/test/userspace/test_ll_task.c b/zephyr/test/userspace/test_ll_task.c index 234423defc60..4bef2a953f61 100644 --- a/zephyr/test/userspace/test_ll_task.c +++ b/zephyr/test/userspace/test_ll_task.c @@ -14,9 +14,20 @@ #include #include #include +#include +#include +#include +#include #include #include #include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -24,6 +35,7 @@ #include #include /* offsetof() */ +#include LOG_MODULE_DECLARE(sof_boot_test, LOG_LEVEL_DBG); @@ -36,10 +48,15 @@ K_APPMEM_PARTITION_DEFINE(userspace_ll_part); /* Global variable for test runs counter, accessible from user-space */ K_APP_BMEM(userspace_ll_part) static int test_runs; +/* User-space thread for pipeline_two_components test */ +#define PPL_USER_STACKSIZE 4096 + +static struct k_thread ppl_user_thread; +static K_THREAD_STACK_DEFINE(ppl_user_stack, PPL_USER_STACKSIZE); + static enum task_state task_callback(void *arg) { LOG_INF("entry"); - if (++test_runs > 3) return SOF_TASK_STATE_COMPLETED; @@ -77,7 +94,7 @@ static void ll_task_test(void) LOG_INF("task scheduled and running"); /* Let the task run for a bit */ - k_sleep(K_MSEC(10)); + k_sleep(K_MSEC(100)); /* Cancel the task to stop any scheduled execution */ ret = schedule_task_cancel(task); @@ -87,6 +104,9 @@ static void ll_task_test(void) ret = schedule_task_free(task); zassert_equal(ret, 0); + k_mem_domain_remove_partition(zephyr_ll_mem_domain(), &userspace_ll_part); + zephyr_ll_task_free(task); + LOG_INF("test complete"); } @@ -129,6 +149,485 @@ ZTEST(userspace_ll, pipeline_check) pipeline_check(); } +/** + * Test creating a pipeline via IPC4 GLB_CREATE_PIPELINE message. + * + * Unlike pipeline_check() which calls pipeline_new() directly, + * this test constructs an ipc4_pipeline_create message and sends + * it through ipc_cmd(), exercising the full IPC4 command dispatch + * path: ipc_cmd() -> ipc4_process_glb_message() -> + * ipc_user_forward_cmd() (userspace) or ipc4_new_pipeline(). + */ +static void ipc4_create_pipeline_check(void) +{ + struct ipc4_pipeline_create pipe_desc = {0}; + struct ipc *ipc = ipc_get(); + struct ipc_cmd_hdr *hdr; + struct ipc_comp_dev *ipc_pipe; + uint32_t pipeline_id = 10; + uint32_t priority = 3; + int ret; + + /* Construct IPC4 CREATE_PIPELINE message */ + pipe_desc.primary.r.type = SOF_IPC4_GLB_CREATE_PIPELINE; + pipe_desc.primary.r.msg_tgt = SOF_IPC4_MESSAGE_TARGET_FW_GEN_MSG; + pipe_desc.primary.r.rsp = SOF_IPC4_MESSAGE_DIR_MSG_REQUEST; + pipe_desc.primary.r.instance_id = pipeline_id; + pipe_desc.primary.r.ppl_priority = priority; + pipe_desc.primary.r.ppl_mem_size = 0; + + pipe_desc.extension.r.core_id = 0; + pipe_desc.extension.r.lp = 0; + pipe_desc.extension.r.payload = 0; + + /* + * Populate handler.c's internal IPC message buffer. + * ipc_compact_read_msg() returns a pointer to the static + * msg_data.msg_in used by ipc_cmd() via ipc4_get_message_request(). + * Overwriting through this pointer sets up the message for dispatch. + */ + hdr = ipc_compact_read_msg(); + hdr->pri = pipe_desc.primary.dat; + hdr->ext = pipe_desc.extension.dat; + + /* Send through the full IPC command dispatch path */ + ipc_cmd(hdr); + + LOG_INF("ipc_cmd() returned for pipeline id=%u", pipeline_id); + + /* Verify pipeline is registered in IPC component list */ + ipc_pipe = ipc_get_pipeline_by_id(ipc, pipeline_id); + zassert_not_null(ipc_pipe, "pipeline not found in IPC comp list"); + zassert_equal(ipc_pipe->type, COMP_TYPE_PIPELINE, "wrong comp type"); + zassert_equal(ipc_pipe->id, pipeline_id, "pipeline id mismatch"); + zassert_not_null(ipc_pipe->pipeline, "pipeline struct is NULL"); + zassert_equal(ipc_pipe->pipeline->pipeline_id, pipeline_id, + "pipeline->pipeline_id mismatch"); + zassert_equal(ipc_pipe->pipeline->priority, priority, + "pipeline priority mismatch"); + zassert_equal(ipc_pipe->pipeline->time_domain, SOF_TIME_DOMAIN_TIMER, + "time_domain not set"); + + LOG_INF("pipeline verified in IPC comp list"); + + /* Clean up through IPC free path */ + ret = ipc_pipeline_free(ipc, pipeline_id); + zassert_equal(ret, 0, "ipc_pipeline_free failed: %%d", ret); + + /* Verify pipeline is removed from IPC component list */ + ipc_pipe = ipc_get_pipeline_by_id(ipc, pipeline_id); + zassert_is_null(ipc_pipe, "pipeline still in IPC comp list after free"); + + LOG_INF("ipc4 create pipeline test complete"); +} + +ZTEST(userspace_ll, ipc4_create_pipeline_check) +{ + ipc4_create_pipeline_check(); +} + +/* Copier UUID: 9ba00c83-ca12-4a83-943c-1fa2e82f9dda */ +static const uint8_t copier_uuid[16] = { + 0x83, 0x0c, 0xa0, 0x9b, 0x12, 0xca, 0x83, 0x4a, + 0x94, 0x3c, 0x1f, 0xa2, 0xe8, 0x2f, 0x9d, 0xda +}; + +/** + * Find the module_id (manifest entry index) for the copier module + * by iterating the firmware manifest and matching the copier UUID. + */ +static int find_copier_module_id(void) +{ + const struct sof_man_fw_desc *desc = basefw_vendor_get_manifest(); + const struct sof_man_module *mod; + uint32_t i; + + if (!desc) + return -1; + + for (i = 0; i < desc->header.num_module_entries; i++) { + mod = (const struct sof_man_module *)((const char *)desc + + SOF_MAN_MODULE_OFFSET(i)); + if (!memcmp(&mod->uuid, copier_uuid, sizeof(copier_uuid))) + return (int)i; + } + + return -1; +} + +/** + * IPC4 copier module config - used as payload for comp_new_ipc4(). + * Placed at MAILBOX_HOSTBOX_BASE before calling comp_new_ipc4(). + * Layout matches struct ipc4_copier_module_cfg from copier.h. + */ +struct copier_init_data { + struct ipc4_base_module_cfg base; + struct ipc4_audio_format out_fmt; + uint32_t copier_feature_mask; + /* Gateway config (matches struct ipc4_copier_gateway_cfg) */ + union ipc4_connector_node_id node_id; + uint32_t dma_buffer_size; + uint32_t config_length; +} __packed __aligned(4); + +static void fill_audio_format(struct ipc4_audio_format *fmt) +{ + memset(fmt, 0, sizeof(*fmt)); + fmt->sampling_frequency = IPC4_FS_48000HZ; + fmt->depth = IPC4_DEPTH_32BIT; + fmt->ch_cfg = IPC4_CHANNEL_CONFIG_STEREO; + fmt->channels_count = 2; + fmt->valid_bit_depth = 32; + fmt->s_type = IPC4_TYPE_MSB_INTEGER; + fmt->interleaving_style = IPC4_CHANNELS_INTERLEAVED; +} + +/** + * Create a copier component via IPC4. + * + * @param module_id Copier module_id from manifest + * @param instance_id Instance ID for this component + * @param pipeline_id Parent pipeline ID + * @param node_id Gateway node ID (type + virtual DMA index) + */ +static struct comp_dev *create_copier(int module_id, int instance_id, + int pipeline_id, + union ipc4_connector_node_id node_id) +{ + struct ipc4_module_init_instance module_init; + struct copier_init_data cfg; + struct comp_dev *dev; + + /* Prepare copier config payload */ + memset(&cfg, 0, sizeof(cfg)); + fill_audio_format(&cfg.base.audio_fmt); + /* 2 channels * 4 bytes * 48 frames = 384 bytes */ + cfg.base.ibs = 384; + cfg.base.obs = 384; + cfg.base.is_pages = 0; + cfg.base.cpc = 0; + cfg.out_fmt = cfg.base.audio_fmt; + cfg.copier_feature_mask = 0; + cfg.node_id = node_id; + cfg.dma_buffer_size = 768; + cfg.config_length = 0; + + /* Write config data to mailbox hostbox (where comp_new_ipc4 reads it). + * Flush cache so that data is visible in SRAM before comp_new_ipc4() + * invalidates the cache line (in normal IPC flow, host writes via DMA + * directly to SRAM, so the invalidation reads fresh data; here the DSP + * core itself writes, so an explicit flush is needed). + */ + memcpy((void *)MAILBOX_HOSTBOX_BASE, &cfg, sizeof(cfg)); + sys_cache_data_flush_range((void *)MAILBOX_HOSTBOX_BASE, sizeof(cfg)); + + /* Prepare IPC4 module init header */ + memset(&module_init, 0, sizeof(module_init)); + module_init.primary.r.module_id = module_id; + module_init.primary.r.instance_id = instance_id; + module_init.primary.r.type = SOF_IPC4_MOD_INIT_INSTANCE; + module_init.primary.r.msg_tgt = SOF_IPC4_MESSAGE_TARGET_MODULE_MSG; + module_init.primary.r.rsp = SOF_IPC4_MESSAGE_DIR_MSG_REQUEST; + + module_init.extension.r.param_block_size = sizeof(cfg) / sizeof(uint32_t); + module_init.extension.r.ppl_instance_id = pipeline_id; + module_init.extension.r.core_id = 0; + module_init.extension.r.proc_domain = 0; /* LL */ + + dev = comp_new_ipc4(&module_init); + + /* + * We use the IPC code to create the components. This code runs + * in kernel space, so we need to separately assign thecreated + * components to the user LL and IPC threads before it can be used. + */ + comp_grant_access_to_thread(dev, &ppl_user_thread); + + return dev; +} + +/** + * Context shared between kernel setup and the user-space pipeline thread. + */ +struct ppl_test_ctx { + struct pipeline *p; + struct k_heap *heap; + struct comp_dev *host_comp; + struct comp_dev *dai_comp; + struct comp_buffer *buf; + struct ipc *ipc; + struct ipc_comp_dev *ipc_pipe; +}; + +/** + * Pipeline operations: connect, complete, prepare, copy, verify, and clean up. + * This function is called either directly (kernel mode) or from a user-space + * thread, exercising pipeline_*() calls from the requested context. + */ +static void pipeline_ops(struct ppl_test_ctx *ctx) +{ + struct pipeline *p = ctx->p; + struct comp_dev *host_comp = ctx->host_comp; + struct comp_dev *dai_comp = ctx->dai_comp; + struct comp_buffer *buf = ctx->buf; + int ret; + + LOG_INF("pipeline_ops: user_context=%d", k_is_user_context()); + + /* Step: Connect host -> buffer -> DAI */ + ret = pipeline_connect(host_comp, buf, PPL_CONN_DIR_COMP_TO_BUFFER); + zassert_equal(ret, 0, "connect host to buffer failed"); + + ret = pipeline_connect(dai_comp, buf, PPL_CONN_DIR_BUFFER_TO_COMP); + zassert_equal(ret, 0, "connect buffer to DAI failed"); + + LOG_INF("host -> buffer -> DAI connected"); + + /* Step: Complete the pipeline */ + ret = pipeline_complete(p, host_comp, dai_comp); + zassert_equal(ret, 0, "pipeline complete failed"); + + /* Step: Prepare the pipeline */ + p->sched_comp = host_comp; + + ret = pipeline_prepare(p, host_comp); + zassert_equal(ret, 0, "pipeline prepare failed"); + + ret = pipeline_trigger(p, host_comp, COMP_TRIGGER_PRE_START); + //zassert_equal(ret, 0, "pipeline TRIGGER_START failed"); + + LOG_INF("pipeline complete, status = %d", p->status); + + /* Step: Run copies */ + pipeline_schedule_copy(p, 1000); + + /* Step: let run for 3 msec */ + k_sleep(K_MSEC(3)); + + /* Verify pipeline source and sink assignments */ + zassert_equal(p->source_comp, host_comp, "source comp mismatch"); + zassert_equal(p->sink_comp, dai_comp, "sink comp mismatch"); + + LOG_INF("pipeline_ops done"); +} + +/** + * User-space thread entry point for pipeline_two_components test. + * p1 points to the ppl_test_ctx shared with the kernel launcher. + */ +static void pipeline_user_thread(void *p1, void *p2, void *p3) +{ + struct ppl_test_ctx *ctx = (struct ppl_test_ctx *)p1; + + zassert_true(k_is_user_context(), "expected user context"); + pipeline_ops(ctx); +} + +/** + * Test creating a pipeline with a host copier and a DAI (link) copier, + * connected through a shared buffer. + * + * When run_in_user is true, all pipeline_*() calls are made from a + * separate user-space thread. + */ +static void pipeline_two_components(bool run_in_user) +{ + struct ppl_test_ctx *ctx; + struct k_heap *heap = NULL; + uint32_t pipeline_id = 2; + uint32_t priority = 0; + struct task *task; + uint32_t comp_id; + int copier_module_id; + int host_instance_id = 0; + int dai_instance_id = 1; + int core = 0; + int ret; + + /* Step: Find the copier module_id from the firmware manifest */ + copier_module_id = find_copier_module_id(); + zassert_true(copier_module_id >= 0, "copier module not found in manifest"); + LOG_INF("copier module_id = %d", copier_module_id); + + /* Step: Create pipeline */ + if (run_in_user) { + LOG_INF("running test with user memory domain"); + heap = zephyr_ll_user_heap(); + zassert_not_null(heap, "user heap not found"); + + task = zephyr_ll_task_alloc(); + zassert_not_null(task, "task allocation failed"); + } else { + task = sof_heap_alloc(NULL, SOF_MEM_FLAG_USER, sizeof(struct task), sizeof(void *)); + LOG_INF("running test with kernel memory domain"); + } + + ctx = sof_heap_alloc(heap, SOF_MEM_FLAG_USER, sizeof(*ctx), 0); + ctx->heap = heap; + ctx->ipc = ipc_get(); + + comp_id = IPC4_COMP_ID(copier_module_id, host_instance_id); + ctx->p = pipeline_new(ctx->heap, pipeline_id, priority, comp_id, NULL); + zassert_not_null(ctx->p, "pipeline creation failed"); + + /* create the LL scheduler thread by initializing one task */ + k_mem_domain_add_partition(zephyr_ll_mem_domain(), &userspace_ll_part); + + test_runs = 0; + ret = schedule_task_init_ll(task, SOF_UUID(test_task_uuid), SOF_SCHEDULE_LL_TIMER, + priority, task_callback, + (void *)&test_runs, core, 0); + zassert_equal(ret, 0); + + LOG_INF("task init done"); + + /* Set pipeline period so components get correct dev->period and dev->frames. + * This mirrors what ipc4_create_pipeline() does in normal IPC flow. + */ + ctx->p->time_domain = SOF_TIME_DOMAIN_TIMER; + ctx->p->period = LL_TIMER_PERIOD_US; + + /* Register pipeline in IPC component list so comp_new_ipc4() can + * find it via ipc_get_comp_by_ppl_id() and set dev->period. + */ + ctx->ipc_pipe = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + sizeof(struct ipc_comp_dev)); + zassert_not_null(ctx->ipc_pipe, "ipc_comp_dev alloc failed"); + ctx->ipc_pipe->pipeline = ctx->p; + ctx->ipc_pipe->type = COMP_TYPE_PIPELINE; + ctx->ipc_pipe->id = pipeline_id; + ctx->ipc_pipe->core = 0; + list_item_append(&ctx->ipc_pipe->list, &ctx->ipc->comp_list); + + /* Step: Create host copier with HDA host output gateway */ + union ipc4_connector_node_id host_node_id = { .f = { + .dma_type = ipc4_hda_host_output_class, + .v_index = 0 + }}; + ctx->host_comp = create_copier(copier_module_id, host_instance_id, pipeline_id, + host_node_id); + zassert_not_null(ctx->host_comp, "host copier creation failed"); + + /* Assign pipeline to host component */ + ctx->host_comp->pipeline = ctx->p; + ctx->host_comp->ipc_config.type = SOF_COMP_HOST; + + LOG_INF("host copier created, comp_id = 0x%x", ctx->host_comp->ipc_config.id); + + /* Step: Create link copier with HDA link output gateway */ + union ipc4_connector_node_id link_node_id = { .f = { + .dma_type = ipc4_hda_link_output_class, + .v_index = 0 + }}; + ctx->dai_comp = create_copier(copier_module_id, dai_instance_id, pipeline_id, + link_node_id); + zassert_not_null(ctx->dai_comp, "DAI copier creation failed"); + + /* Assign pipeline to DAI component */ + ctx->dai_comp->pipeline = ctx->p; + ctx->dai_comp->ipc_config.type = SOF_COMP_DAI; + + LOG_INF("DAI copier created, comp_id = 0x%x", ctx->dai_comp->ipc_config.id); + + /* Step: Allocate a buffer to connect host -> DAI */ + ctx->buf = buffer_alloc(ctx->heap, 384, 0, 0, false); + zassert_not_null(ctx->buf, "buffer allocation failed"); + + if (run_in_user) { + struct k_thread *task_thread; + + /* Create a user-space thread to execute pipeline operations */ + k_thread_create(&ppl_user_thread, ppl_user_stack, PPL_USER_STACKSIZE, + pipeline_user_thread, ctx, NULL, NULL, + -1, K_USER, K_FOREVER); + + /* Add thread to LL memory domain so it can access pipeline memory */ + k_mem_domain_add_thread(zephyr_ll_mem_domain(), &ppl_user_thread); + + user_grant_dai_access_all(&ppl_user_thread); + user_grant_dma_access_all(&ppl_user_thread); + user_access_to_mailbox(zephyr_ll_mem_domain(), &ppl_user_thread); + zephyr_ll_grant_access(&ppl_user_thread); + + task_thread = scheduler_init_context(task); + zassert_not_null(task_thread); + + /* + * A hack for testing purposes, normally DAI module + * is created in user-space so it gets access + * automatically. Until that works, use dai_dd directly. + */ + struct dai_data *dai_dd = comp_get_drvdata(ctx->dai_comp); + struct k_mutex *dai_lock = dai_dd->dai->lock; + LOG_INF("dai_lock mutex %p", dai_lock); + k_thread_access_grant(task_thread, dai_lock); + k_thread_access_grant(&ppl_user_thread, dai_lock); + comp_grant_access_to_thread(ctx->dai_comp, task_thread); + comp_grant_access_to_thread(ctx->host_comp, task_thread); + + k_thread_start(&ppl_user_thread); + + LOG_INF("user thread started, waiting for completion"); + + k_thread_join(&ppl_user_thread, K_FOREVER); + } else { + /* Run pipeline operations directly in kernel context */ + pipeline_ops(ctx); + } + + /* Step: Clean up - reset, disconnect, free buffer, free components, free pipeline */ + /* Reset pipeline to bring components back to COMP_STATE_READY, + * required before ipc_comp_free() which rejects non-READY components. + */ + ret = pipeline_reset(ctx->p, ctx->host_comp); + zassert_equal(ret, 0, "pipeline reset failed"); + + pipeline_disconnect(ctx->host_comp, ctx->buf, PPL_CONN_DIR_COMP_TO_BUFFER); + pipeline_disconnect(ctx->dai_comp, ctx->buf, PPL_CONN_DIR_BUFFER_TO_COMP); + + buffer_free(ctx->buf); + + /* Free components through IPC to properly remove from IPC device list */ + ret = ipc_comp_free(ctx->ipc, ctx->host_comp->ipc_config.id); + zassert_equal(ret, 0, "host comp free failed"); + + ret = ipc_comp_free(ctx->ipc, ctx->dai_comp->ipc_config.id); + zassert_equal(ret, 0, "DAI comp free failed"); + + /* Unregister pipeline from IPC component list */ + list_item_del(&ctx->ipc_pipe->list); + rfree(ctx->ipc_pipe); + + ret = pipeline_free(ctx->p); + zassert_equal(ret, 0, "pipeline free failed"); + + scheduler_free_context(); + + ret = schedule_task_free(task); + zassert_equal(ret, 0); + + sof_heap_free(heap, ctx); + + if (run_in_user) { + zephyr_ll_task_free(task); + k_mem_domain_remove_partition(zephyr_ll_mem_domain(), &userspace_ll_part); + } else { + sof_heap_free(NULL, task); + } + + LOG_INF("two component pipeline test complete"); +} + +ZTEST(userspace_ll, pipeline_two_components_kernel) +{ + pipeline_two_components(false); +} + +ZTEST(userspace_ll, pipeline_two_components_user) +{ + pipeline_two_components(true); +} + ZTEST_SUITE(userspace_ll, NULL, NULL, NULL, NULL, NULL); /** diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index 2e302f22c94b..f814c7c52beb 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -265,7 +265,11 @@ void platform_dai_timestamp(struct comp_dev *dai, /* get current wallclock for componnent */ void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock) { +#ifndef CONFIG_SOF_USERSPACE_LL *wallclock = sof_cycle_get_64(); +#else + *wallclock = k_uptime_get(); +#endif } /* @@ -333,7 +337,10 @@ void k_sys_fatal_error_handler(unsigned int reason, /* flush and switch to immediate mode */ LOG_PANIC(); + /* IPC not set up in standalone test mode */ +#ifndef CONFIG_SOF_BOOT_TEST_STANDALONE ipc_send_panic_notification(); +#endif #if defined(CONFIG_ARCH_POSIX) || defined(CONFIG_ZEPHYR_POSIX) LOG_ERR("Halting emulation");