2828#define CAPTURE_MIN_PERIOD_SIZE 320
2929#define BUFFER_BYTES_MAX (PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE)
3030#define BUFFER_BYTES_MIN (PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE)
31+ #define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024)
32+ #define COMPR_PLAYBACK_MAX_NUM_FRAGMENTS (16 * 4)
3133#define SID_MASK_DEFAULT 0xF
3234
3335enum stream_state {
@@ -55,6 +57,7 @@ struct q6apm_dai_rtd {
5557 enum stream_state state ;
5658 struct q6apm_graph * graph ;
5759 spinlock_t lock ;
60+ bool notify_on_drain ;
5861};
5962
6063struct q6apm_dai_data {
@@ -132,6 +135,69 @@ static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, vo
132135 }
133136}
134137
138+ static void event_handler_compr (uint32_t opcode , uint32_t token ,
139+ uint32_t * payload , void * priv )
140+ {
141+ struct q6apm_dai_rtd * prtd = priv ;
142+ struct snd_compr_stream * substream = prtd -> cstream ;
143+ unsigned long flags ;
144+ uint32_t wflags = 0 ;
145+ uint64_t avail ;
146+ uint32_t bytes_written , bytes_to_write ;
147+ bool is_last_buffer = false;
148+
149+ switch (opcode ) {
150+ case APM_CLIENT_EVENT_CMD_EOS_DONE :
151+ spin_lock_irqsave (& prtd -> lock , flags );
152+ if (prtd -> notify_on_drain ) {
153+ snd_compr_drain_notify (prtd -> cstream );
154+ prtd -> notify_on_drain = false;
155+ } else {
156+ prtd -> state = Q6APM_STREAM_STOPPED ;
157+ }
158+ spin_unlock_irqrestore (& prtd -> lock , flags );
159+ break ;
160+ case APM_CLIENT_EVENT_DATA_WRITE_DONE :
161+ spin_lock_irqsave (& prtd -> lock , flags );
162+ bytes_written = token >> APM_WRITE_TOKEN_LEN_SHIFT ;
163+ prtd -> copied_total += bytes_written ;
164+ snd_compr_fragment_elapsed (substream );
165+
166+ if (prtd -> state != Q6APM_STREAM_RUNNING ) {
167+ spin_unlock_irqrestore (& prtd -> lock , flags );
168+ break ;
169+ }
170+
171+ avail = prtd -> bytes_received - prtd -> bytes_sent ;
172+
173+ if (avail > prtd -> pcm_count ) {
174+ bytes_to_write = prtd -> pcm_count ;
175+ } else {
176+ if (substream -> partial_drain || prtd -> notify_on_drain )
177+ is_last_buffer = true;
178+ bytes_to_write = avail ;
179+ }
180+
181+ if (bytes_to_write ) {
182+ if (substream -> partial_drain && is_last_buffer )
183+ wflags |= APM_LAST_BUFFER_FLAG ;
184+
185+ q6apm_write_async (prtd -> graph ,
186+ bytes_to_write , 0 , 0 , wflags );
187+
188+ prtd -> bytes_sent += bytes_to_write ;
189+
190+ if (prtd -> notify_on_drain && is_last_buffer )
191+ audioreach_shared_memory_send_eos (prtd -> graph );
192+ }
193+
194+ spin_unlock_irqrestore (& prtd -> lock , flags );
195+ break ;
196+ default :
197+ break ;
198+ }
199+ }
200+
135201static int q6apm_dai_prepare (struct snd_soc_component * component ,
136202 struct snd_pcm_substream * substream )
137203{
@@ -387,6 +453,75 @@ static int q6apm_dai_pcm_new(struct snd_soc_component *component, struct snd_soc
387453 return snd_pcm_set_fixed_buffer_all (rtd -> pcm , SNDRV_DMA_TYPE_DEV , component -> dev , size );
388454}
389455
456+ static int q6apm_dai_compr_open (struct snd_soc_component * component ,
457+ struct snd_compr_stream * stream )
458+ {
459+ struct snd_soc_pcm_runtime * rtd = stream -> private_data ;
460+ struct snd_soc_dai * cpu_dai = asoc_rtd_to_cpu (rtd , 0 );
461+ struct snd_compr_runtime * runtime = stream -> runtime ;
462+ struct q6apm_dai_rtd * prtd ;
463+ struct q6apm_dai_data * pdata ;
464+ struct device * dev = component -> dev ;
465+ int ret , size ;
466+ int graph_id ;
467+
468+ graph_id = cpu_dai -> driver -> id ;
469+ pdata = snd_soc_component_get_drvdata (component );
470+ if (!pdata )
471+ return - EINVAL ;
472+
473+ prtd = kzalloc (sizeof (* prtd ), GFP_KERNEL );
474+ if (prtd == NULL )
475+ return - ENOMEM ;
476+
477+ prtd -> cstream = stream ;
478+ prtd -> graph = q6apm_graph_open (dev , (q6apm_cb )event_handler_compr , prtd , graph_id );
479+ if (IS_ERR (prtd -> graph )) {
480+ ret = PTR_ERR (prtd -> graph );
481+ kfree (prtd );
482+ return ret ;
483+ }
484+
485+ runtime -> private_data = prtd ;
486+ runtime -> dma_bytes = BUFFER_BYTES_MAX ;
487+ size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE * COMPR_PLAYBACK_MAX_NUM_FRAGMENTS ;
488+ ret = snd_dma_alloc_pages (SNDRV_DMA_TYPE_DEV , dev , size , & prtd -> dma_buffer );
489+ if (ret )
490+ return ret ;
491+
492+ if (pdata -> sid < 0 )
493+ prtd -> phys = prtd -> dma_buffer .addr ;
494+ else
495+ prtd -> phys = prtd -> dma_buffer .addr | (pdata -> sid << 32 );
496+
497+ snd_compr_set_runtime_buffer (stream , & prtd -> dma_buffer );
498+ spin_lock_init (& prtd -> lock );
499+
500+ q6apm_enable_compress_module (dev , prtd -> graph , true);
501+ return 0 ;
502+ }
503+
504+ static int q6apm_dai_compr_free (struct snd_soc_component * component ,
505+ struct snd_compr_stream * stream )
506+ {
507+ struct snd_compr_runtime * runtime = stream -> runtime ;
508+ struct q6apm_dai_rtd * prtd = runtime -> private_data ;
509+
510+ q6apm_graph_stop (prtd -> graph );
511+ q6apm_unmap_memory_regions (prtd -> graph , SNDRV_PCM_STREAM_PLAYBACK );
512+ q6apm_graph_close (prtd -> graph );
513+ snd_dma_free_pages (& prtd -> dma_buffer );
514+ prtd -> graph = NULL ;
515+ kfree (prtd );
516+ runtime -> private_data = NULL ;
517+
518+ return 0 ;
519+ }
520+ static const struct snd_compress_ops q6apm_dai_compress_ops = {
521+ .open = q6apm_dai_compr_open ,
522+ .free = q6apm_dai_compr_free ,
523+ };
524+
390525static const struct snd_soc_component_driver q6apm_fe_dai_component = {
391526 .name = DRV_NAME ,
392527 .open = q6apm_dai_open ,
@@ -396,6 +531,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
396531 .hw_params = q6apm_dai_hw_params ,
397532 .pointer = q6apm_dai_pointer ,
398533 .trigger = q6apm_dai_trigger ,
534+ .compress_ops = & q6apm_dai_compress_ops ,
399535};
400536
401537static int q6apm_dai_probe (struct platform_device * pdev )
0 commit comments