diff options
Diffstat (limited to 'Documentation/sound')
| -rw-r--r-- | Documentation/sound/alsa/ALSA-Configuration.txt | 6 | ||||
| -rw-r--r-- | Documentation/sound/alsa/Audiophile-Usb.txt | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/CMIPCI.txt | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/HD-Audio-Models.txt | 58 | ||||
| -rw-r--r-- | Documentation/sound/alsa/HD-Audio.txt | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/README.maya44 | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/compress_offload.txt | 10 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/DPCM.txt | 380 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/codec.txt | 46 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/dapm.txt | 73 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/machine.txt | 6 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/overview.txt | 27 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/platform.txt | 19 | ||||
| -rw-r--r-- | Documentation/sound/oss/vwsnd | 293 |
14 files changed, 525 insertions, 401 deletions
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index 95731a08f25..7ccf933bfbe 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt @@ -616,7 +616,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. As default, snd-dummy drivers doesn't allocate the real buffers but either ignores read/write or mmap a single dummy page to all - buffer pages, in order to save the resouces. If your apps need + buffer pages, in order to save the resources. If your apps need the read/ written buffer data to be consistent, pass fake_buffer=0 option. @@ -948,7 +948,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. avoided as much as possible... MORE NOTES ON "azx_get_response timeout" PROBLEMS: - On some hardwares, you may need to add a proper probe_mask option + On some hardware, you may need to add a proper probe_mask option to avoid the "azx_get_response timeout" problem above, instead. This occurs when the access to non-existing or non-working codec slot (likely a modem one) causes a stall of the communication via HD-audio @@ -1124,7 +1124,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. buggy_irq - Enable workaround for buggy interrupts on some motherboards (default yes on nForce chips, otherwise off) - buggy_semaphore - Enable workaround for hardwares with buggy + buggy_semaphore - Enable workaround for hardware with buggy semaphores (e.g. on some ASUS laptops) (default off) spdif_aclink - Use S/PDIF over AC-link instead of direct connection diff --git a/Documentation/sound/alsa/Audiophile-Usb.txt b/Documentation/sound/alsa/Audiophile-Usb.txt index 654dd3b694a..e7a5ed4dcae 100644 --- a/Documentation/sound/alsa/Audiophile-Usb.txt +++ b/Documentation/sound/alsa/Audiophile-Usb.txt @@ -232,7 +232,7 @@ The parameter can be given: # modprobe snd-usb-audio index=1 device_setup=0x09 * Or while configuring the modules options in your modules configuration file - (tipically a .conf file in /etc/modprobe.d/ directory: + (typically a .conf file in /etc/modprobe.d/ directory: alias snd-card-1 snd-usb-audio options snd-usb-audio index=1 device_setup=0x09 diff --git a/Documentation/sound/alsa/CMIPCI.txt b/Documentation/sound/alsa/CMIPCI.txt index 16935c8561f..4e36e6e809c 100644 --- a/Documentation/sound/alsa/CMIPCI.txt +++ b/Documentation/sound/alsa/CMIPCI.txt @@ -87,7 +87,7 @@ with 4 channels, and use the interleaved 4 channel data. -There are some control switchs affecting to the speaker connections: +There are some control switches affecting to the speaker connections: "Line-In Mode" - an enum control to change the behavior of line-in jack. Either "Line-In", "Rear Output" or "Bass Output" can diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index bb8b0dc532b..d1ab5e17eb1 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -21,38 +21,42 @@ ALC267/268 ========== inv-dmic Inverted internal mic workaround -ALC269/270/275/276/280/282 +ALC269/270/275/276/28x/29x ====== - laptop-amic Laptops with analog-mic input - laptop-dmic Laptops with digital-mic input - alc269-dmic Enable ALC269(VA) digital mic workaround - alc271-dmic Enable ALC271X digital mic workaround - inv-dmic Inverted internal mic workaround - lenovo-dock Enables docking station I/O for some Lenovos - -ALC662/663/272 + laptop-amic Laptops with analog-mic input + laptop-dmic Laptops with digital-mic input + alc269-dmic Enable ALC269(VA) digital mic workaround + alc271-dmic Enable ALC271X digital mic workaround + inv-dmic Inverted internal mic workaround + headset-mic Indicates a combined headset (headphone+mic) jack + lenovo-dock Enables docking station I/O for some Lenovos + dell-headset-multi Headset jack, which can also be used as mic-in + dell-headset-dock Headset jack (without mic-in), and also dock I/O + +ALC66x/67x/892 ============== - mario Chromebook mario model fixup - asus-mode1 ASUS - asus-mode2 ASUS - asus-mode3 ASUS - asus-mode4 ASUS - asus-mode5 ASUS - asus-mode6 ASUS - asus-mode7 ASUS - asus-mode8 ASUS - inv-dmic Inverted internal mic workaround + mario Chromebook mario model fixup + asus-mode1 ASUS + asus-mode2 ASUS + asus-mode3 ASUS + asus-mode4 ASUS + asus-mode5 ASUS + asus-mode6 ASUS + asus-mode7 ASUS + asus-mode8 ASUS + inv-dmic Inverted internal mic workaround + dell-headset-multi Headset jack, which can also be used as mic-in ALC680 ====== N/A -ALC882/883/885/888/889 +ALC88x/898/1150 ====================== acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G acer-aspire-8930g Acer Aspire 8330G/6935G acer-aspire Acer Aspire others - inv-dmic Inverted internal mic workaround + inv-dmic Inverted internal mic workaround no-primary-hp VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC) ALC861/660 @@ -241,6 +245,7 @@ STAC9227/9228/9229/927x 5stack-no-fp D965 5stack without front panel dell-3stack Dell Dimension E520 dell-bios Fixes with Dell BIOS setup + dell-bios-amic Fixes with Dell BIOS setup including analog mic volknob Fixes with volume-knob widget 0x24 auto BIOS setup (default) @@ -281,6 +286,11 @@ STAC92HD83* hp-inv-led HP with broken BIOS for inverted mute LED auto BIOS setup (default) +STAC92HD95 +========== + hp-led LED support for HP laptops + hp-bass Bass HPF setup for HP Spectre 13 + STAC9872 ======== vaio VAIO laptop without SPDIF @@ -292,6 +302,12 @@ Cirrus Logic CS4206/4207 imac27 IMac 27 Inch auto BIOS setup (default) +Cirrus Logic CS4208 +=================== + mba6 MacBook Air 6,1 and 6,2 + gpio0 Enable GPIO 0 amp + auto BIOS setup (default) + VIA VT17xx/VT18xx/VT20xx ======================== auto BIOS setup (default) diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt index c3c912d023c..42a0a39b77e 100644 --- a/Documentation/sound/alsa/HD-Audio.txt +++ b/Documentation/sound/alsa/HD-Audio.txt @@ -454,6 +454,8 @@ The generic parser supports the following hints: - need_dac_fix (bool): limits the DACs depending on the channel count - primary_hp (bool): probe headphone jacks as the primary outputs; default true +- multi_io (bool): try probing multi-I/O config (e.g. shared + line-in/surround, mic/clfe jacks) - multi_cap_vol (bool): provide multiple capture volumes - inv_dmic_split (bool): provide split internal mic volume/switch for phase-inverted digital mics diff --git a/Documentation/sound/alsa/README.maya44 b/Documentation/sound/alsa/README.maya44 index 0e41576fa13..67b2ea1cc31 100644 --- a/Documentation/sound/alsa/README.maya44 +++ b/Documentation/sound/alsa/README.maya44 @@ -120,7 +120,7 @@ Mic Phantom+48V: switch for +48V phantom power for electrostatic microphones on Make sure this is not turned on while any other source is connected to input 1/2. It might damage the source and/or the maya44 card. -Mic/Line input: if switch is is on, input jack 1/2 is microphone input (mono), otherwise line input (stereo). +Mic/Line input: if switch is on, input jack 1/2 is microphone input (mono), otherwise line input (stereo). Bypass: analogue bypass from ADC input to output for channel 1+2. Same as "Monitor" in the windows driver. Bypass 1: same for channel 3+4. diff --git a/Documentation/sound/alsa/compress_offload.txt b/Documentation/sound/alsa/compress_offload.txt index 0bcc5515591..630c492c3dc 100644 --- a/Documentation/sound/alsa/compress_offload.txt +++ b/Documentation/sound/alsa/compress_offload.txt @@ -73,7 +73,7 @@ The main requirements are: Design -The new API shares a number of concepts with with the PCM API for flow +The new API shares a number of concepts with the PCM API for flow control. Start, pause, resume, drain and stop commands have the same semantics no matter what the content is. @@ -130,7 +130,7 @@ the settings should remain the exception. The timestamp becomes a multiple field structure. It lists the number of bytes transferred, the number of samples processed and the number of samples rendered/grabbed. All these values can be used to determine -the avarage bitrate, figure out if the ring buffer needs to be +the average bitrate, figure out if the ring buffer needs to be refilled or the delay due to decoding/encoding/io on the DSP. Note that the list of codecs/profiles/modes was derived from the @@ -217,12 +217,12 @@ Not supported: would be enabled with ALSA kcontrols. - Audio policy/resource management. This API does not provide any - hooks to query the utilization of the audio DSP, nor any premption + hooks to query the utilization of the audio DSP, nor any preemption mechanisms. -- No notion of underun/overrun. Since the bytes written are compressed +- No notion of underrun/overrun. Since the bytes written are compressed in nature and data written/read doesn't translate directly to - rendered output in time, this does not deal with underrun/overun and + rendered output in time, this does not deal with underrun/overrun and maybe dealt in user-library Credits: diff --git a/Documentation/sound/alsa/soc/DPCM.txt b/Documentation/sound/alsa/soc/DPCM.txt new file mode 100644 index 00000000000..0110180b7ac --- /dev/null +++ b/Documentation/sound/alsa/soc/DPCM.txt @@ -0,0 +1,380 @@ +Dynamic PCM +=========== + +1. Description +============== + +Dynamic PCM allows an ALSA PCM device to digitally route its PCM audio to +various digital endpoints during the PCM stream runtime. e.g. PCM0 can route +digital audio to I2S DAI0, I2S DAI1 or PDM DAI2. This is useful for on SoC DSP +drivers that expose several ALSA PCMs and can route to multiple DAIs. + +The DPCM runtime routing is determined by the ALSA mixer settings in the same +way as the analog signal is routed in an ASoC codec driver. DPCM uses a DAPM +graph representing the DSP internal audio paths and uses the mixer settings to +determine the patch used by each ALSA PCM. + +DPCM re-uses all the existing component codec, platform and DAI drivers without +any modifications. + + +Phone Audio System with SoC based DSP +------------------------------------- + +Consider the following phone audio subsystem. This will be used in this +document for all examples :- + +| Front End PCMs | SoC DSP | Back End DAIs | Audio devices | + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +This diagram shows a simple smart phone audio subsystem. It supports Bluetooth, +FM digital radio, Speakers, Headset Jack, digital microphones and cellular +modem. This sound card exposes 4 DSP front end (FE) ALSA PCM devices and +supports 6 back end (BE) DAIs. Each FE PCM can digitally route audio data to any +of the BE DAIs. The FE PCM devices can also route audio to more than 1 BE DAI. + + + +Example - DPCM Switching playback from DAI0 to DAI1 +--------------------------------------------------- + +Audio is being played to the Headset. After a while the user removes the headset +and audio continues playing on the speakers. + +Playback on PCM0 to Headset would look like :- + + ************* +PCM0 <============> * * <====DAI0=====> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +The headset is removed from the jack by user so the speakers must now be used :- + + ************* +PCM0 <============> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <====DAI1=====> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +The audio driver processes this as follows :- + + 1) Machine driver receives Jack removal event. + + 2) Machine driver OR audio HAL disables the Headset path. + + 3) DPCM runs the PCM trigger(stop), hw_free(), shutdown() operations on DAI0 + for headset since the path is now disabled. + + 4) Machine driver or audio HAL enables the speaker path. + + 5) DPCM runs the PCM ops for startup(), hw_params(), prepapre() and + trigger(start) for DAI1 Speakers since the path is enabled. + +In this example, the machine driver or userspace audio HAL can alter the routing +and then DPCM will take care of managing the DAI PCM operations to either bring +the link up or down. Audio playback does not stop during this transition. + + + +DPCM machine driver +=================== + +The DPCM enabled ASoC machine driver is similar to normal machine drivers +except that we also have to :- + + 1) Define the FE and BE DAI links. + + 2) Define any FE/BE PCM operations. + + 3) Define widget graph connections. + + +1 FE and BE DAI links +--------------------- + +| Front End PCMs | SoC DSP | Back End DAIs | Audio devices | + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +For the example above we have to define 4 FE DAI links and 6 BE DAI links. The +FE DAI links are defined as follows :- + +static struct snd_soc_dai_link machine_dais[] = { + { + .name = "PCM0 System", + .stream_name = "System Playback", + .cpu_dai_name = "System Pin", + .platform_name = "dsp-audio", + .codec_name = "snd-soc-dummy", + .codec_dai_name = "snd-soc-dummy-dai", + .dynamic = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, + .dpcm_playback = 1, + }, + .....< other FE and BE DAI links here > +}; + +This FE DAI link is pretty similar to a regular DAI link except that we also +set the DAI link to a DPCM FE with the "dynamic = 1". The supported FE stream +directions should also be set with the "dpcm_playback" and "dpcm_capture" +flags. There is also an option to specify the ordering of the trigger call for +each FE. This allows the ASoC core to trigger the DSP before or after the other +components (as some DSPs have strong requirements for the ordering DAI/DSP +start and stop sequences). + +The FE DAI above sets the codec and code DAIs to dummy devices since the BE is +dynamic and will change depending on runtime config. + +The BE DAIs are configured as follows :- + +static struct snd_soc_dai_link machine_dais[] = { + .....< FE DAI links here > + { + .name = "Codec Headset", + .cpu_dai_name = "ssp-dai.0", + .platform_name = "snd-soc-dummy", + .no_pcm = 1, + .codec_name = "rt5640.0-001c", + .codec_dai_name = "rt5640-aif1", + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .be_hw_params_fixup = hswult_ssp0_fixup, + .ops = &haswell_ops, + .dpcm_playback = 1, + .dpcm_capture = 1, + }, + .....< other BE DAI links here > +}; + +This BE DAI link connects DAI0 to the codec (in this case RT5460 AIF1). It sets +the "no_pcm" flag to mark it has a BE and sets flags for supported stream +directions using "dpcm_playback" and "dpcm_capture" above. + +The BE has also flags set for ignoring suspend and PM down time. This allows +the BE to work in a hostless mode where the host CPU is not transferring data +like a BT phone call :- + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <====DAI2=====> MODEM + * * +PCM3 <------------> * * <====DAI3=====> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +This allows the host CPU to sleep whilst the DSP, MODEM DAI and the BT DAI are +still in operation. + +A BE DAI link can also set the codec to a dummy device if the code is a device +that is managed externally. + +Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the +DSP firmware. + + +2 FE/BE PCM operations +---------------------- + +The BE above also exports some PCM operations and a "fixup" callback. The fixup +callback is used by the machine driver to (re)configure the DAI based upon the +FE hw params. i.e. the DSP may perform SRC or ASRC from the FE to BE. + +e.g. DSP converts all FE hw params to run at fixed rate of 48k, 16bit, stereo for +DAI0. This means all FE hw_params have to be fixed in the machine driver for +DAI0 so that the DAI is running at desired configuration regardless of the FE +configuration. + +static int dai0_fixup(struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate = hw_param_interval(params, + SNDRV_PCM_HW_PARAM_RATE); + struct snd_interval *channels = hw_param_interval(params, + SNDRV_PCM_HW_PARAM_CHANNELS); + + /* The DSP will covert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set DAI0 to 16 bit */ + snd_mask_set(¶ms->masks[SNDRV_PCM_HW_PARAM_FORMAT - + SNDRV_PCM_HW_PARAM_FIRST_MASK], + SNDRV_PCM_FORMAT_S16_LE); + return 0; +} + +The other PCM operation are the same as for regular DAI links. Use as necessary. + + +3 Widget graph connections +-------------------------- + +The BE DAI links will normally be connected to the graph at initialisation time +by the ASoC DAPM core. However, if the BE codec or BE DAI is a dummy then this +has to be set explicitly in the driver :- + +/* BE for codec Headset - DAI0 is dummy and managed by DSP FW */ +{"DAI0 CODEC IN", NULL, "AIF1 Capture"}, +{"AIF1 Playback", NULL, "DAI0 CODEC OUT"}, + + +Writing a DPCM DSP driver +========================= + +The DPCM DSP driver looks much like a standard platform class ASoC driver +combined with elements from a codec class driver. A DSP platform driver must +implement :- + + 1) Front End PCM DAIs - i.e. struct snd_soc_dai_driver. + + 2) DAPM graph showing DSP audio routing from FE DAIs to BEs. + + 3) DAPM widgets from DSP graph. + + 4) Mixers for gains, routing, etc. + + 5) DMA configuration. + + 6) BE AIF widgets. + +Items 6 is important for routing the audio outside of the DSP. AIF need to be +defined for each BE and each stream direction. e.g for BE DAI0 above we would +have :- + +SND_SOC_DAPM_AIF_IN("DAI0 RX", NULL, 0, SND_SOC_NOPM, 0, 0), +SND_SOC_DAPM_AIF_OUT("DAI0 TX", NULL, 0, SND_SOC_NOPM, 0, 0), + +The BE AIF are used to connect the DSP graph to the graphs for the other +component drivers (e.g. codec graph). + + +Hostless PCM streams +==================== + +A hostless PCM stream is a stream that is not routed through the host CPU. An +example of this would be a phone call from handset to modem. + + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <====DAI1=====> Codec Speakers/Mic + * DSP * +PCM2 <------------> * * <====DAI2=====> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +In this case the PCM data is routed via the DSP. The host CPU in this use case +is only used for control and can sleep during the runtime of the stream. + +The host can control the hostless link either by :- + + 1) Configuring the link as a CODEC <-> CODEC style link. In this case the link + is enabled or disabled by the state of the DAPM graph. This usually means + there is a mixer control that can be used to connect or disconnect the path + between both DAIs. + + 2) Hostless FE. This FE has a virtual connection to the BE DAI links on the DAPM + graph. Control is then carried out by the FE as regular PCM operations. + This method gives more control over the DAI links, but requires much more + userspace code to control the link. Its recommended to use CODEC<->CODEC + unless your HW needs more fine grained sequencing of the PCM ops. + + +CODEC <-> CODEC link +-------------------- + +This DAI link is enabled when DAPM detects a valid path within the DAPM graph. +The machine driver sets some additional parameters to the DAI link i.e. + +static const struct snd_soc_pcm_stream dai_params = { + .formats = SNDRV_PCM_FMTBIT_S32_LE, + .rate_min = 8000, + .rate_max = 8000, + .channels_min = 2, + .channels_max = 2, +}; + +static struct snd_soc_dai_link dais[] = { + < ... more DAI links above ... > + { + .name = "MODEM", + .stream_name = "MODEM", + .cpu_dai_name = "dai2", + .codec_dai_name = "modem-aif1", + .codec_name = "modem", + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF + | SND_SOC_DAIFMT_CBM_CFM, + .params = &dai_params, + } + < ... more DAI links here ... > + +These parameters are used to configure the DAI hw_params() when DAPM detects a +valid path and then calls the PCM operations to start the link. DAPM will also +call the appropriate PCM operations to disable the DAI when the path is no +longer valid. + + +Hostless FE +----------- + +The DAI link(s) are enabled by a FE that does not read or write any PCM data. +This means creating a new FE that is connected with a virtual path to both +DAI links. The DAI links will be started when the FE PCM is started and stopped +when the FE PCM is stopped. Note that the FE PCM cannot read or write data in +this configuration. + + diff --git a/Documentation/sound/alsa/soc/codec.txt b/Documentation/sound/alsa/soc/codec.txt index bce23a4a787..db5f9c9ae14 100644 --- a/Documentation/sound/alsa/soc/codec.txt +++ b/Documentation/sound/alsa/soc/codec.txt @@ -1,22 +1,23 @@ -ASoC Codec Driver -================= +ASoC Codec Class Driver +======================= -The codec driver is generic and hardware independent code that configures the -codec to provide audio capture and playback. It should contain no code that is -specific to the target platform or machine. All platform and machine specific -code should be added to the platform and machine drivers respectively. +The codec class driver is generic and hardware independent code that configures +the codec, FM, MODEM, BT or external DSP to provide audio capture and playback. +It should contain no code that is specific to the target platform or machine. +All platform and machine specific code should be added to the platform and +machine drivers respectively. -Each codec driver *must* provide the following features:- +Each codec class driver *must* provide the following features:- 1) Codec DAI and PCM configuration - 2) Codec control IO - using I2C, 3 Wire(SPI) or both APIs + 2) Codec control IO - using RegMap API 3) Mixers and audio controls 4) Codec audio operations + 5) DAPM description. + 6) DAPM event handler. Optionally, codec drivers can also provide:- - 5) DAPM description. - 6) DAPM event handler. 7) DAC Digital mute control. Its probably best to use this guide in conjunction with the existing codec @@ -64,26 +65,9 @@ struct snd_soc_dai_driver wm8731_dai = { 2 - Codec control IO -------------------- The codec can usually be controlled via an I2C or SPI style interface -(AC97 combines control with data in the DAI). The codec drivers provide -functions to read and write the codec registers along with supplying a -register cache:- - - /* IO control data and register cache */ - void *control_data; /* codec control (i2c/3wire) data */ - void *reg_cache; - -Codec read/write should do any data formatting and call the hardware -read write below to perform the IO. These functions are called by the -core and ALSA when performing DAPM or changing the mixer:- - - unsigned int (*read)(struct snd_soc_codec *, unsigned int); - int (*write)(struct snd_soc_codec *, unsigned int, unsigned int); - -Codec hardware IO functions - usually points to either the I2C, SPI or AC97 -read/write:- - - hw_write_t hw_write; - hw_read_t hw_read; +(AC97 combines control with data in the DAI). The codec driver should use the +Regmap API for all codec IO. Please see include/linux/regmap.h and existing +codec drivers for example regmap usage. 3 - Mixers and audio controls @@ -127,7 +111,7 @@ Defines a stereo enumerated control 4 - Codec Audio Operations -------------------------- -The codec driver also supports the following ALSA operations:- +The codec driver also supports the following ALSA PCM operations:- /* SoC audio ops */ struct snd_soc_ops { diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt index 05bf5a0eee4..6faab488000 100644 --- a/Documentation/sound/alsa/soc/dapm.txt +++ b/Documentation/sound/alsa/soc/dapm.txt @@ -21,7 +21,7 @@ level power systems. There are 4 power domains within DAPM - 1. Codec domain - VREF, VMID (core codec and audio power) + 1. Codec bias domain - VREF, VMID (core codec and audio power) Usually controlled at codec probe/remove and suspend/resume, although can be set at stream time if power is not needed for sidetone, etc. @@ -30,7 +30,7 @@ There are 4 power domains within DAPM machine driver and responds to asynchronous events e.g when HP are inserted - 3. Path domain - audio susbsystem signal paths + 3. Path domain - audio subsystem signal paths Automatically set when mixer and mux settings are changed by the user. e.g. alsamixer, amixer. @@ -63,14 +63,22 @@ Audio DAPM widgets fall into a number of types:- o Line - Line Input/Output (and optional Jack) o Speaker - Speaker o Supply - Power or clock supply widget used by other widgets. + o Regulator - External regulator that supplies power to audio components. + o Clock - External clock that supplies clock to audio components. + o AIF IN - Audio Interface Input (with TDM slot mask). + o AIF OUT - Audio Interface Output (with TDM slot mask). + o Siggen - Signal Generator. + o DAI IN - Digital Audio Interface Input. + o DAI OUT - Digital Audio Interface Output. + o DAI Link - DAI Link between two DAI structures */ o Pre - Special PRE widget (exec before all others) o Post - Special POST widget (exec after all others) (Widgets are defined in include/sound/soc-dapm.h) -Widgets are usually added in the codec driver and the machine driver. There are -convenience macros defined in soc-dapm.h that can be used to quickly build a -list of widgets of the codecs and machines DAPM widgets. +Widgets can be added to the sound card by any of the component driver types. +There are convenience macros defined in soc-dapm.h that can be used to quickly +build a list of widgets of the codecs and machines DAPM widgets. Most widgets have a name, register, shift and invert. Some widgets have extra parameters for stream name and kcontrols. @@ -80,11 +88,13 @@ parameters for stream name and kcontrols. ------------------------- Stream Widgets relate to the stream power domain and only consist of ADCs -(analog to digital converters) and DACs (digital to analog converters). +(analog to digital converters), DACs (digital to analog converters), +AIF IN and AIF OUT. Stream widgets have the following format:- SND_SOC_DAPM_DAC(name, stream name, reg, shift, invert), +SND_SOC_DAPM_AIF_IN(name, stream, slot, reg, shift, invert) NOTE: the stream name must match the corresponding stream name in your codec snd_soc_codec_dai. @@ -94,6 +104,11 @@ e.g. stream widgets for HiFi playback and capture SND_SOC_DAPM_DAC("HiFi DAC", "HiFi Playback", REG, 3, 1), SND_SOC_DAPM_ADC("HiFi ADC", "HiFi Capture", REG, 2, 1), +e.g. stream widgets for AIF + +SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), +SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0), + 2.2 Path Domain Widgets ----------------------- @@ -121,12 +136,14 @@ If you dont want the mixer elements prefixed with the name of the mixer widget, you can use SND_SOC_DAPM_MIXER_NAMED_CTL instead. the parameters are the same as for SND_SOC_DAPM_MIXER. -2.3 Platform/Machine domain Widgets ------------------------------------ + +2.3 Machine domain Widgets +-------------------------- Machine widgets are different from codec widgets in that they don't have a codec register bit associated with them. A machine widget is assigned to each -machine audio component (non codec) that can be independently powered. e.g. +machine audio component (non codec or DSP) that can be independently +powered. e.g. o Speaker Amp o Microphone Bias @@ -146,12 +163,12 @@ static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event) SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias), -2.4 Codec Domain ----------------- +2.4 Codec (BIAS) Domain +----------------------- -The codec power domain has no widgets and is handled by the codecs DAPM event -handler. This handler is called when the codec powerstate is changed wrt to any -stream event or by kernel PM events. +The codec bias power domain has no widgets and is handled by the codecs DAPM +event handler. This handler is called when the codec powerstate is changed wrt +to any stream event or by kernel PM events. 2.5 Virtual Widgets @@ -169,15 +186,16 @@ After all the widgets have been defined, they can then be added to the DAPM subsystem individually with a call to snd_soc_dapm_new_control(). -3. Codec Widget Interconnections -================================ +3. Codec/DSP Widget Interconnections +==================================== -Widgets are connected to each other within the codec and machine by audio paths -(called interconnections). Each interconnection must be defined in order to -create a map of all audio paths between widgets. +Widgets are connected to each other within the codec, platform and machine by +audio paths (called interconnections). Each interconnection must be defined in +order to create a map of all audio paths between widgets. -This is easiest with a diagram of the codec (and schematic of the machine audio -system), as it requires joining widgets together via their audio signal paths. +This is easiest with a diagram of the codec or DSP (and schematic of the machine +audio system), as it requires joining widgets together via their audio signal +paths. e.g., from the WM8731 output mixer (wm8731.c) @@ -247,16 +265,9 @@ machine and includes the codec. e.g. o Mic Jack o Codec Pins -When a codec pin is NC it can be marked as not used with a call to - -snd_soc_dapm_set_endpoint(codec, "Widget Name", 0); - -The last argument is 0 for inactive and 1 for active. This way the pin and its -input widget will never be powered up and consume power. - -This also applies to machine widgets. e.g. if a headphone is connected to a -jack then the jack can be marked active. If the headphone is removed, then -the headphone jack can be marked inactive. +Endpoints are added to the DAPM graph so that their usage can be determined in +order to save power. e.g. NC codecs pins will be switched OFF, unconnected +jacks can also be switched OFF. 5 DAPM Widget Events diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt index d50c14df341..74056dba52b 100644 --- a/Documentation/sound/alsa/soc/machine.txt +++ b/Documentation/sound/alsa/soc/machine.txt @@ -1,8 +1,10 @@ ASoC Machine Driver =================== -The ASoC machine (or board) driver is the code that glues together the platform -and codec drivers. +The ASoC machine (or board) driver is the code that glues together all the +component drivers (e.g. codecs, platforms and DAIs). It also describes the +relationships between each componnent which include audio paths, GPIOs, +interrupts, clocking, jacks and voltage regulators. The machine driver can contain codec and platform specific code. It registers the audio subsystem with the kernel as a platform device and is represented by diff --git a/Documentation/sound/alsa/soc/overview.txt b/Documentation/sound/alsa/soc/overview.txt index 138ac88c146..ff88f52eec9 100644 --- a/Documentation/sound/alsa/soc/overview.txt +++ b/Documentation/sound/alsa/soc/overview.txt @@ -49,18 +49,23 @@ features :- * Machine specific controls: Allow machines to add controls to the sound card (e.g. volume control for speaker amplifier). -To achieve all this, ASoC basically splits an embedded audio system into 3 -components :- +To achieve all this, ASoC basically splits an embedded audio system into +multiple re-usable component drivers :- - * Codec driver: The codec driver is platform independent and contains audio - controls, audio interface capabilities, codec DAPM definition and codec IO - functions. + * Codec class drivers: The codec class driver is platform independent and + contains audio controls, audio interface capabilities, codec DAPM + definition and codec IO functions. This class extends to BT, FM and MODEM + ICs if required. Codec class drivers should be generic code that can run + on any architecture and machine. - * Platform driver: The platform driver contains the audio DMA engine and audio - interface drivers (e.g. I2S, AC97, PCM) for that platform. + * Platform class drivers: The platform class driver includes the audio DMA + engine driver, digital audio interface (DAI) drivers (e.g. I2S, AC97, PCM) + and any audio DSP drivers for that platform. - * Machine driver: The machine driver handles any machine specific controls and - audio events (e.g. turning on an amp at start of playback). + * Machine class driver: The machine driver class acts as the glue that + decribes and binds the other component drivers together to form an ALSA + "sound card device". It handles any machine specific controls and + machine level audio events (e.g. turning on an amp at start of playback). Documentation @@ -84,3 +89,7 @@ machine.txt: Machine driver internals. pop_clicks.txt: How to minimise audio artifacts. clocking.txt: ASoC clocking for best power performance. + +jack.txt: ASoC jack detection. + +DPCM.txt: Dynamic PCM - Describes DPCM with DSP examples. diff --git a/Documentation/sound/alsa/soc/platform.txt b/Documentation/sound/alsa/soc/platform.txt index d57efad37e0..3a08a2c9150 100644 --- a/Documentation/sound/alsa/soc/platform.txt +++ b/Documentation/sound/alsa/soc/platform.txt @@ -1,9 +1,9 @@ ASoC Platform Driver ==================== -An ASoC platform driver can be divided into audio DMA and SoC DAI configuration -and control. The platform drivers only target the SoC CPU and must have no board -specific code. +An ASoC platform driver class can be divided into audio DMA drivers, SoC DAI +drivers and DSP drivers. The platform drivers only target the SoC CPU and must +have no board specific code. Audio DMA ========= @@ -64,3 +64,16 @@ Each SoC DAI driver must provide the following features:- 5) Suspend and resume (optional) Please see codec.txt for a description of items 1 - 4. + + +SoC DSP Drivers +=============== + +Each SoC DSP driver usually supplies the following features :- + + 1) DAPM graph + 2) Mixer controls + 3) DMA IO to/from DSP buffers (if applicable) + 4) Definition of DSP front end (FE) PCM devices. + +Please see DPCM.txt for a description of item 4. diff --git a/Documentation/sound/oss/vwsnd b/Documentation/sound/oss/vwsnd deleted file mode 100644 index 4c6cbdb3c54..00000000000 --- a/Documentation/sound/oss/vwsnd +++ /dev/null @@ -1,293 +0,0 @@ -vwsnd - Sound driver for the Silicon Graphics 320 and 540 Visual -Workstations' onboard audio. - -Copyright 1999 Silicon Graphics, Inc. All rights reserved. - - -At the time of this writing, March 1999, there are two models of -Visual Workstation, the 320 and the 540. This document only describes -those models. Future Visual Workstation models may have different -sound capabilities, and this driver will probably not work on those -boxes. - -The Visual Workstation has an Analog Devices AD1843 "SoundComm" audio -codec chip. The AD1843 is accessed through the Cobalt I/O ASIC, also -known as Lithium. This driver programs both chips. - -============================================================================== -QUICK CONFIGURATION - - # insmod soundcore - # insmod vwsnd - -============================================================================== -I/O CONNECTIONS - -On the Visual Workstation, only three of the AD1843 inputs are hooked -up. The analog line in jacks are connected to the AD1843's AUX1 -input. The CD audio lines are connected to the AD1843's AUX2 input. -The microphone jack is connected to the AD1843's MIC input. The mic -jack is mono, but the signal is delivered to both the left and right -MIC inputs. You can record in stereo from the mic input, but you will -get the same signal on both channels (within the limits of A/D -accuracy). Full scale on the Line input is +/- 2.0 V. Full scale on -the MIC input is 20 dB less, or +/- 0.2 V. - -The AD1843's LOUT1 outputs are connected to the Line Out jacks. The -AD1843's HPOUT outputs are connected to the speaker/headphone jack. -LOUT2 is not connected. Line out's maximum level is +/- 2.0 V peak to -peak. The speaker/headphone out's maximum is +/- 4.0 V peak to peak. - -The AD1843's PCM input channel and one of its output channels (DAC1) -are connected to Lithium. The other output channel (DAC2) is not -connected. - -============================================================================== -CAPABILITIES - -The AD1843 has PCM input and output (Pulse Code Modulation, also known -as wavetable). PCM input and output can be mono or stereo in any of -four formats. The formats are 16 bit signed and 8 bit unsigned, -u-Law, and A-Law format. Any sample rate from 4 KHz to 49 KHz is -available, in 1 Hz increments. - -The AD1843 includes an analog mixer that can mix all three input -signals (line, mic and CD) into the analog outputs. The mixer has a -separate gain control and mute switch for each input. - -There are two outputs, line out and speaker/headphone out. They -always produce the same signal, and the speaker always has 3 dB more -gain than the line out. The speaker/headphone output can be muted, -but this driver does not export that function. - -The hardware can sync audio to the video clock, but this driver does -not have a way to specify syncing to video. - -============================================================================== -PROGRAMMING - -This section explains the API supported by the driver. Also see the -Open Sound Programming Guide at http://www.opensound.com/pguide/ . -This section assumes familiarity with that document. - -The driver has two interfaces, an I/O interface and a mixer interface. -There is no MIDI or sequencer capability. - -============================================================================== -PROGRAMMING PCM I/O - -The I/O interface is usually accessed as /dev/audio or /dev/dsp. -Using the standard Open Sound System (OSS) ioctl calls, the sample -rate, number of channels, and sample format may be set within the -limitations described above. The driver supports triggering. It also -supports getting the input and output pointers with one-sample -accuracy. - -The SNDCTL_DSP_GETCAP ioctl returns these capabilities. - - DSP_CAP_DUPLEX - driver supports full duplex. - - DSP_CAP_TRIGGER - driver supports triggering. - - DSP_CAP_REALTIME - values returned by SNDCTL_DSP_GETIPTR - and SNDCTL_DSP_GETOPTR are accurate to a few samples. - -Memory mapping (mmap) is not implemented. - -The driver permits subdivided fragment sizes from 64 to 4096 bytes. -The number of fragments can be anything from 3 fragments to however -many fragments fit into 124 kilobytes. It is up to the user to -determine how few/small fragments can be used without introducing -glitches with a given workload. Linux is not realtime, so we can't -promise anything. (sigh...) - -When this driver is switched into or out of mu-Law or A-Law mode on -output, it may produce an audible click. This is unavoidable. To -prevent clicking, use signed 16-bit mode instead, and convert from -mu-Law or A-Law format in software. - -============================================================================== -PROGRAMMING THE MIXER INTERFACE - -The mixer interface is usually accessed as /dev/mixer. It is accessed -through ioctls. The mixer allows the application to control gain or -mute several audio signal paths, and also allows selection of the -recording source. - -Each of the constants described here can be read using the -MIXER_READ(SOUND_MIXER_xxx) ioctl. Those that are not read-only can -also be written using the MIXER_WRITE(SOUND_MIXER_xxx) ioctl. In most -cases, <sys/soundcard.h> defines constants SOUND_MIXER_READ_xxx and -SOUND_MIXER_WRITE_xxx which work just as well. - -SOUND_MIXER_CAPS Read-only - -This is a mask of optional driver capabilities that are implemented. -This driver's only capability is SOUND_CAP_EXCL_INPUT, which means -that only one recording source can be active at a time. - -SOUND_MIXER_DEVMASK Read-only - -This is a mask of the sound channels. This driver's channels are PCM, -LINE, MIC, CD, and RECLEV. - -SOUND_MIXER_STEREODEVS Read-only - -This is a mask of which sound channels are capable of stereo. All -channels are capable of stereo. (But see caveat on MIC input in I/O -CONNECTIONS section above). - -SOUND_MIXER_OUTMASK Read-only - -This is a mask of channels that route inputs through to outputs. -Those are LINE, MIC, and CD. - -SOUND_MIXER_RECMASK Read-only - -This is a mask of channels that can be recording sources. Those are -PCM, LINE, MIC, CD. - -SOUND_MIXER_PCM Default: 0x5757 (0 dB) - -This is the gain control for PCM output. The left and right channel -gain are controlled independently. This gain control has 64 levels, -which range from -82.5 dB to +12.0 dB in 1.5 dB steps. Those 64 -levels are mapped onto 100 levels at the ioctl, see below. - -SOUND_MIXER_LINE Default: 0x4a4a (0 dB) - -This is the gain control for mixing the Line In source into the -outputs. The left and right channel gain are controlled -independently. This gain control has 32 levels, which range from --34.5 dB to +12.0 dB in 1.5 dB steps. Those 32 levels are mapped onto -100 levels at the ioctl, see below. - -SOUND_MIXER_MIC Default: 0x4a4a (0 dB) - -This is the gain control for mixing the MIC source into the outputs. -The left and right channel gain are controlled independently. This -gain control has 32 levels, which range from -34.5 dB to +12.0 dB in -1.5 dB steps. Those 32 levels are mapped onto 100 levels at the -ioctl, see below. - -SOUND_MIXER_CD Default: 0x4a4a (0 dB) - -This is the gain control for mixing the CD audio source into the -outputs. The left and right channel gain are controlled -independently. This gain control has 32 levels, which range from --34.5 dB to +12.0 dB in 1.5 dB steps. Those 32 levels are mapped onto -100 levels at the ioctl, see below. - -SOUND_MIXER_RECLEV Default: 0 (0 dB) - -This is the gain control for PCM input (RECording LEVel). The left -and right channel gain are controlled independently. This gain -control has 16 levels, which range from 0 dB to +22.5 dB in 1.5 dB -steps. Those 16 levels are mapped onto 100 levels at the ioctl, see -below. - -SOUND_MIXER_RECSRC Default: SOUND_MASK_LINE - -This is a mask of currently selected PCM input sources (RECording -SouRCes). Because the AD1843 can only have a single recording source -at a time, only one bit at a time can be set in this mask. The -allowable values are SOUND_MASK_PCM, SOUND_MASK_LINE, SOUND_MASK_MIC, -or SOUND_MASK_CD. Selecting SOUND_MASK_PCM sets up internal -resampling which is useful for loopback testing and for hardware -sample rate conversion. But software sample rate conversion is -probably faster, so I don't know how useful that is. - -SOUND_MIXER_OUTSRC DEFAULT: SOUND_MASK_LINE|SOUND_MASK_MIC|SOUND_MASK_CD - -This is a mask of sources that are currently passed through to the -outputs. Those sources whose bits are not set are muted. - -============================================================================== -GAIN CONTROL - -There are five gain controls listed above. Each has 16, 32, or 64 -steps. Each control has 1.5 dB of gain per step. Each control is -stereo. - -The OSS defines the argument to a channel gain ioctl as having two -components, left and right, each of which ranges from 0 to 100. The -two components are packed into the same word, with the left side gain -in the least significant byte, and the right side gain in the second -least significant byte. In C, we would say this. - - #include <assert.h> - - ... - - assert(leftgain >= 0 && leftgain <= 100); - assert(rightgain >= 0 && rightgain <= 100); - arg = leftgain | rightgain << 8; - -So each OSS gain control has 101 steps. But the hardware has 16, 32, -or 64 steps. The hardware steps are spread across the 101 OSS steps -nearly evenly. The conversion formulas are like this, given N equals -16, 32, or 64. - - int round = N/2 - 1; - OSS_gain_steps = (hw_gain_steps * 100 + round) / (N - 1); - hw_gain_steps = (OSS_gain_steps * (N - 1) + round) / 100; - -Here is a snippet of C code that will return the left and right gain -of any channel in dB. Pass it one of the predefined gain_desc_t -structures to access any of the five channels' gains. - - typedef struct gain_desc { - float min_gain; - float gain_step; - int nbits; - int chan; - } gain_desc_t; - - const gain_desc_t gain_pcm = { -82.5, 1.5, 6, SOUND_MIXER_PCM }; - const gain_desc_t gain_line = { -34.5, 1.5, 5, SOUND_MIXER_LINE }; - const gain_desc_t gain_mic = { -34.5, 1.5, 5, SOUND_MIXER_MIC }; - const gain_desc_t gain_cd = { -34.5, 1.5, 5, SOUND_MIXER_CD }; - const gain_desc_t gain_reclev = { 0.0, 1.5, 4, SOUND_MIXER_RECLEV }; - - int get_gain_dB(int fd, const gain_desc_t *gp, - float *left, float *right) - { - int word; - int lg, rg; - int mask = (1 << gp->nbits) - 1; - - if (ioctl(fd, MIXER_READ(gp->chan), &word) != 0) - return -1; /* fail */ - lg = word & 0xFF; - rg = word >> 8 & 0xFF; - lg = (lg * mask + mask / 2) / 100; - rg = (rg * mask + mask / 2) / 100; - *left = gp->min_gain + gp->gain_step * lg; - *right = gp->min_gain + gp->gain_step * rg; - return 0; - } - -And here is the corresponding routine to set a channel's gain in dB. - - int set_gain_dB(int fd, const gain_desc_t *gp, float left, float right) - { - float max_gain = - gp->min_gain + (1 << gp->nbits) * gp->gain_step; - float round = gp->gain_step / 2; - int mask = (1 << gp->nbits) - 1; - int word; - int lg, rg; - - if (left < gp->min_gain || right < gp->min_gain) - return EINVAL; - lg = (left - gp->min_gain + round) / gp->gain_step; - rg = (right - gp->min_gain + round) / gp->gain_step; - if (lg >= (1 << gp->nbits) || rg >= (1 << gp->nbits)) - return EINVAL; - lg = (100 * lg + mask / 2) / mask; - rg = (100 * rg + mask / 2) / mask; - word = lg | rg << 8; - - return ioctl(fd, MIXER_WRITE(gp->chan), &word); - } - |
