diff options
Diffstat (limited to 'Documentation/sound')
| -rw-r--r-- | Documentation/sound/alsa/ALSA-Configuration.txt | 11 | ||||
| -rw-r--r-- | Documentation/sound/alsa/Audiophile-Usb.txt | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/CMIPCI.txt | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/HD-Audio-Models.txt | 60 | ||||
| -rw-r--r-- | Documentation/sound/alsa/HD-Audio.txt | 130 | ||||
| -rw-r--r-- | Documentation/sound/alsa/README.maya44 | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/compress_offload.txt | 56 | ||||
| -rw-r--r-- | Documentation/sound/alsa/seq_oss.html | 2 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/DPCM.txt | 380 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/codec.txt | 46 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/dapm.txt | 73 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/machine.txt | 6 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/overview.txt | 27 | ||||
| -rw-r--r-- | Documentation/sound/alsa/soc/platform.txt | 19 | ||||
| -rw-r--r-- | Documentation/sound/oss/vwsnd | 293 |
15 files changed, 677 insertions, 432 deletions
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index d90d8ec2853..7ccf933bfbe 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt @@ -616,7 +616,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. As default, snd-dummy drivers doesn't allocate the real buffers but either ignores read/write or mmap a single dummy page to all - buffer pages, in order to save the resouces. If your apps need + buffer pages, in order to save the resources. If your apps need the read/ written buffer data to be consistent, pass fake_buffer=0 option. @@ -911,7 +911,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. models depending on the codec chip. The list of available models is found in HD-Audio-Models.txt - The model name "genric" is treated as a special case. When this + The model name "generic" is treated as a special case. When this model is given, the driver uses the generic codec parser without "codec-patch". It's sometimes good for testing and debugging. @@ -948,7 +948,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. avoided as much as possible... MORE NOTES ON "azx_get_response timeout" PROBLEMS: - On some hardwares, you may need to add a proper probe_mask option + On some hardware, you may need to add a proper probe_mask option to avoid the "azx_get_response timeout" problem above, instead. This occurs when the access to non-existing or non-working codec slot (likely a modem one) causes a stall of the communication via HD-audio @@ -1124,7 +1124,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. buggy_irq - Enable workaround for buggy interrupts on some motherboards (default yes on nForce chips, otherwise off) - buggy_semaphore - Enable workaround for hardwares with buggy + buggy_semaphore - Enable workaround for hardware with buggy semaphores (e.g. on some ASUS laptops) (default off) spdif_aclink - Use S/PDIF over AC-link instead of direct connection @@ -1905,7 +1905,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. vid - Vendor ID for the device (optional) pid - Product ID for the device (optional) nrpacks - Max. number of packets per URB (default: 8) - async_unlink - Use async unlink mode (default: yes) device_setup - Device specific magic number (optional) - Influence depends on the device - Default: 0x0000 @@ -1917,8 +1916,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. NB: nrpacks parameter can be modified dynamically via sysfs. Don't put the value over 20. Changing via sysfs has no sanity check. - NB: async_unlink=0 would cause Oops. It remains just for - debugging purpose (if any). NB: ignore_ctl_error=1 may help when you get an error at accessing the mixer element such as URB error -22. This happens on some buggy USB device or the controller. diff --git a/Documentation/sound/alsa/Audiophile-Usb.txt b/Documentation/sound/alsa/Audiophile-Usb.txt index 654dd3b694a..e7a5ed4dcae 100644 --- a/Documentation/sound/alsa/Audiophile-Usb.txt +++ b/Documentation/sound/alsa/Audiophile-Usb.txt @@ -232,7 +232,7 @@ The parameter can be given: # modprobe snd-usb-audio index=1 device_setup=0x09 * Or while configuring the modules options in your modules configuration file - (tipically a .conf file in /etc/modprobe.d/ directory: + (typically a .conf file in /etc/modprobe.d/ directory: alias snd-card-1 snd-usb-audio options snd-usb-audio index=1 device_setup=0x09 diff --git a/Documentation/sound/alsa/CMIPCI.txt b/Documentation/sound/alsa/CMIPCI.txt index 16935c8561f..4e36e6e809c 100644 --- a/Documentation/sound/alsa/CMIPCI.txt +++ b/Documentation/sound/alsa/CMIPCI.txt @@ -87,7 +87,7 @@ with 4 channels, and use the interleaved 4 channel data. -There are some control switchs affecting to the speaker connections: +There are some control switches affecting to the speaker connections: "Line-In Mode" - an enum control to change the behavior of line-in jack. Either "Line-In", "Rear Output" or "Bass Output" can diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 16dfe57f173..d1ab5e17eb1 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -21,39 +21,43 @@ ALC267/268 ========== inv-dmic Inverted internal mic workaround -ALC269/270/275/276/280/282 +ALC269/270/275/276/28x/29x ====== - laptop-amic Laptops with analog-mic input - laptop-dmic Laptops with digital-mic input - alc269-dmic Enable ALC269(VA) digital mic workaround - alc271-dmic Enable ALC271X digital mic workaround - inv-dmic Inverted internal mic workaround - lenovo-dock Enables docking station I/O for some Lenovos - -ALC662/663/272 + laptop-amic Laptops with analog-mic input + laptop-dmic Laptops with digital-mic input + alc269-dmic Enable ALC269(VA) digital mic workaround + alc271-dmic Enable ALC271X digital mic workaround + inv-dmic Inverted internal mic workaround + headset-mic Indicates a combined headset (headphone+mic) jack + lenovo-dock Enables docking station I/O for some Lenovos + dell-headset-multi Headset jack, which can also be used as mic-in + dell-headset-dock Headset jack (without mic-in), and also dock I/O + +ALC66x/67x/892 ============== - mario Chromebook mario model fixup - asus-mode1 ASUS - asus-mode2 ASUS - asus-mode3 ASUS - asus-mode4 ASUS - asus-mode5 ASUS - asus-mode6 ASUS - asus-mode7 ASUS - asus-mode8 ASUS - inv-dmic Inverted internal mic workaround + mario Chromebook mario model fixup + asus-mode1 ASUS + asus-mode2 ASUS + asus-mode3 ASUS + asus-mode4 ASUS + asus-mode5 ASUS + asus-mode6 ASUS + asus-mode7 ASUS + asus-mode8 ASUS + inv-dmic Inverted internal mic workaround + dell-headset-multi Headset jack, which can also be used as mic-in ALC680 ====== N/A -ALC882/883/885/888/889 +ALC88x/898/1150 ====================== acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G acer-aspire-8930g Acer Aspire 8330G/6935G acer-aspire Acer Aspire others - inv-dmic Inverted internal mic workaround - no-primary-hp VAIO Z workaround (for fixed speaker DAC) + inv-dmic Inverted internal mic workaround + no-primary-hp VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC) ALC861/660 ========== @@ -241,6 +245,7 @@ STAC9227/9228/9229/927x 5stack-no-fp D965 5stack without front panel dell-3stack Dell Dimension E520 dell-bios Fixes with Dell BIOS setup + dell-bios-amic Fixes with Dell BIOS setup including analog mic volknob Fixes with volume-knob widget 0x24 auto BIOS setup (default) @@ -281,6 +286,11 @@ STAC92HD83* hp-inv-led HP with broken BIOS for inverted mute LED auto BIOS setup (default) +STAC92HD95 +========== + hp-led LED support for HP laptops + hp-bass Bass HPF setup for HP Spectre 13 + STAC9872 ======== vaio VAIO laptop without SPDIF @@ -292,6 +302,12 @@ Cirrus Logic CS4206/4207 imac27 IMac 27 Inch auto BIOS setup (default) +Cirrus Logic CS4208 +=================== + mba6 MacBook Air 6,1 and 6,2 + gpio0 Enable GPIO 0 amp + auto BIOS setup (default) + VIA VT17xx/VT18xx/VT20xx ======================== auto BIOS setup (default) diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt index 7813c06a5c7..42a0a39b77e 100644 --- a/Documentation/sound/alsa/HD-Audio.txt +++ b/Documentation/sound/alsa/HD-Audio.txt @@ -176,14 +176,14 @@ support the automatic probing (yet as of 2.6.28). And, BIOS is often, yes, pretty often broken. It sets up wrong values and screws up the driver. -The preset model is provided basically to overcome such a situation. -When the matching preset model is found in the white-list, the driver -assumes the static configuration of that preset and builds the mixer -elements and PCM streams based on the static information. Thus, if -you have a newer machine with a slightly different PCI SSID from the -existing one, you may have a good chance to re-use the same model. -You can pass the `model` option to specify the preset model instead of -PCI SSID look-up. +The preset model (or recently called as "fix-up") is provided +basically to overcome such a situation. When the matching preset +model is found in the white-list, the driver assumes the static +configuration of that preset with the correct pin setup, etc. +Thus, if you have a newer machine with a slightly different PCI SSID +(or codec SSID) from the existing one, you may have a good chance to +re-use the same model. You can pass the `model` option to specify the +preset model instead of PCI (and codec-) SSID look-up. What `model` option values are available depends on the codec chip. Check your codec chip from the codec proc file (see "Codec Proc-File" @@ -199,17 +199,12 @@ non-working HD-audio hardware is to check HD-audio codec and several different `model` option values. If you have any luck, some of them might suit with your device well. -Some codecs such as ALC880 have a special model option `model=test`. -This configures the driver to provide as many mixer controls as -possible for every single pin feature except for the unsolicited -events (and maybe some other specials). Adjust each mixer element and -try the I/O in the way of trial-and-error until figuring out the whole -I/O pin mappings. +There are a few special model option values: +- when 'nofixup' is passed, the device-specific fixups in the codec + parser are skipped. +- when `generic` is passed, the codec-specific parser is skipped and + only the generic parser is used. -Note that `model=generic` has a special meaning. It means to use the -generic parser regardless of the codec. Usually the codec-specific -parser is much better than the generic parser (as now). Thus this -option is more about the debugging purpose. Speaker and Headphone Output ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -387,9 +382,8 @@ init_verbs:: (separated with a space). hints:: Shows / stores hint strings for codec parsers for any use. - Its format is `key = value`. For example, passing `hp_detect = yes` - to IDT/STAC codec parser will result in the disablement of the - headphone detection. + Its format is `key = value`. For example, passing `jack_detect = no` + will disable the jack detection of the machine completely. init_pin_configs:: Shows the initial pin default config values set by BIOS. driver_pin_configs:: @@ -421,6 +415,65 @@ re-configure based on that state, run like below: ------------------------------------------------------------------------ +Hint Strings +~~~~~~~~~~~~ +The codec parser have several switches and adjustment knobs for +matching better with the actual codec or device behavior. Many of +them can be adjusted dynamically via "hints" strings as mentioned in +the section above. For example, by passing `jack_detect = no` string +via sysfs or a patch file, you can disable the jack detection, thus +the codec parser will skip the features like auto-mute or mic +auto-switch. As a boolean value, either `yes`, `no`, `true`, `false`, +`1` or `0` can be passed. + +The generic parser supports the following hints: + +- jack_detect (bool): specify whether the jack detection is available + at all on this machine; default true +- inv_jack_detect (bool): indicates that the jack detection logic is + inverted +- trigger_sense (bool): indicates that the jack detection needs the + explicit call of AC_VERB_SET_PIN_SENSE verb +- inv_eapd (bool): indicates that the EAPD is implemented in the + inverted logic +- pcm_format_first (bool): sets the PCM format before the stream tag + and channel ID +- sticky_stream (bool): keep the PCM format, stream tag and ID as long + as possible; default true +- spdif_status_reset (bool): reset the SPDIF status bits at each time + the SPDIF stream is set up +- pin_amp_workaround (bool): the output pin may have multiple amp + values +- single_adc_amp (bool): ADCs can have only single input amps +- auto_mute (bool): enable/disable the headphone auto-mute feature; + default true +- auto_mic (bool): enable/disable the mic auto-switch feature; default + true +- line_in_auto_switch (bool): enable/disable the line-in auto-switch + feature; default false +- need_dac_fix (bool): limits the DACs depending on the channel count +- primary_hp (bool): probe headphone jacks as the primary outputs; + default true +- multi_io (bool): try probing multi-I/O config (e.g. shared + line-in/surround, mic/clfe jacks) +- multi_cap_vol (bool): provide multiple capture volumes +- inv_dmic_split (bool): provide split internal mic volume/switch for + phase-inverted digital mics +- indep_hp (bool): provide the independent headphone PCM stream and + the corresponding mixer control, if available +- add_stereo_mix_input (bool): add the stereo mix (analog-loopback + mix) to the input mux if available +- add_jack_modes (bool): add "xxx Jack Mode" enum controls to each + I/O jack for allowing to change the headphone amp and mic bias VREF + capabilities +- power_down_unused (bool): power down the unused widgets +- add_hp_mic (bool): add the headphone to capture source if possible +- hp_mic_detect (bool): enable/disable the hp/mic shared input for a + single built-in mic case; default true +- mixer_nid (int): specifies the widget NID of the analog-loopback + mixer + + Early Patching ~~~~~~~~~~~~~~ When CONFIG_SND_HDA_PATCH_LOADER=y is set, you can pass a "patch" as a @@ -445,7 +498,7 @@ A patch file is a plain text file which looks like below: 0x20 0x400 0xff [hint] - hp_detect = yes + jack_detect = no ------------------------------------------------------------------------ The file needs to have a line `[codec]`. The next line should contain @@ -531,6 +584,13 @@ cable is unplugged. Thus, if you hear noises, suspect first the power-saving. See /sys/module/snd_hda_intel/parameters/power_save to check the current value. If it's non-zero, the feature is turned on. +The recent kernel supports the runtime PM for the HD-audio controller +chip, too. It means that the HD-audio controller is also powered up / +down dynamically. The feature is enabled only for certain controller +chips like Intel LynxPoint. You can enable/disable this feature +forcibly by setting `power_save_controller` option, which is also +available at /sys/module/snd_hda_intel/parameters directory. + Tracepoints ~~~~~~~~~~~ @@ -587,8 +647,9 @@ The latest development codes for HD-audio are found on sound git tree: - git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git The master branch or for-next branches can be used as the main -development branches in general while the HD-audio specific patches -are committed in topic/hda branch. +development branches in general while the development for the current +and next kernels are found in for-linus and for-next branches, +respectively. If you are using the latest Linus tree, it'd be better to pull the above GIT tree onto it. If you are using the older kernels, an easy @@ -699,7 +760,11 @@ won't be always updated. For example, the volume values are usually cached in the driver, and thus changing the widget amp value directly via hda-verb won't change the mixer value. -The hda-verb program is found in the ftp directory: +The hda-verb program is included now in alsa-tools: + +- git://git.alsa-project.org/alsa-tools.git + +Also, the old stand-alone package is found in the ftp directory: - ftp://ftp.suse.com/pub/people/tiwai/misc/ @@ -777,3 +842,18 @@ A git repository is available: See README file in the tarball for more details about hda-emu program. + + +hda-jack-retask +~~~~~~~~~~~~~~~ +hda-jack-retask is a user-friendly GUI program to manipulate the +HD-audio pin control for jack retasking. If you have a problem about +the jack assignment, try this program and check whether you can get +useful results. Once when you figure out the proper pin assignment, +it can be fixed either in the driver code statically or via passing a +firmware patch file (see "Early Patching" section). + +The program is included in alsa-tools now: + +- git://git.alsa-project.org/alsa-tools.git + diff --git a/Documentation/sound/alsa/README.maya44 b/Documentation/sound/alsa/README.maya44 index 0e41576fa13..67b2ea1cc31 100644 --- a/Documentation/sound/alsa/README.maya44 +++ b/Documentation/sound/alsa/README.maya44 @@ -120,7 +120,7 @@ Mic Phantom+48V: switch for +48V phantom power for electrostatic microphones on Make sure this is not turned on while any other source is connected to input 1/2. It might damage the source and/or the maya44 card. -Mic/Line input: if switch is is on, input jack 1/2 is microphone input (mono), otherwise line input (stereo). +Mic/Line input: if switch is on, input jack 1/2 is microphone input (mono), otherwise line input (stereo). Bypass: analogue bypass from ADC input to output for channel 1+2. Same as "Monitor" in the windows driver. Bypass 1: same for channel 3+4. diff --git a/Documentation/sound/alsa/compress_offload.txt b/Documentation/sound/alsa/compress_offload.txt index 90e9b3a11ab..630c492c3dc 100644 --- a/Documentation/sound/alsa/compress_offload.txt +++ b/Documentation/sound/alsa/compress_offload.txt @@ -73,7 +73,7 @@ The main requirements are: Design -The new API shares a number of concepts with with the PCM API for flow +The new API shares a number of concepts with the PCM API for flow control. Start, pause, resume, drain and stop commands have the same semantics no matter what the content is. @@ -130,7 +130,7 @@ the settings should remain the exception. The timestamp becomes a multiple field structure. It lists the number of bytes transferred, the number of samples processed and the number of samples rendered/grabbed. All these values can be used to determine -the avarage bitrate, figure out if the ring buffer needs to be +the average bitrate, figure out if the ring buffer needs to be refilled or the delay due to decoding/encoding/io on the DSP. Note that the list of codecs/profiles/modes was derived from the @@ -145,6 +145,52 @@ Modifications include: - Addition of encoding options when required (derived from OpenMAX IL) - Addition of rateControlSupported (missing in OpenMAX AL) +Gapless Playback +================ +When playing thru an album, the decoders have the ability to skip the encoder +delay and padding and directly move from one track content to another. The end +user can perceive this as gapless playback as we dont have silence while +switching from one track to another + +Also, there might be low-intensity noises due to encoding. Perfect gapless is +difficult to reach with all types of compressed data, but works fine with most +music content. The decoder needs to know the encoder delay and encoder padding. +So we need to pass this to DSP. This metadata is extracted from ID3/MP4 headers +and are not present by default in the bitstream, hence the need for a new +interface to pass this information to the DSP. Also DSP and userspace needs to +switch from one track to another and start using data for second track. + +The main additions are: + +- set_metadata +This routine sets the encoder delay and encoder padding. This can be used by +decoder to strip the silence. This needs to be set before the data in the track +is written. + +- set_next_track +This routine tells DSP that metadata and write operation sent after this would +correspond to subsequent track + +- partial drain +This is called when end of file is reached. The userspace can inform DSP that +EOF is reached and now DSP can start skipping padding delay. Also next write +data would belong to next track + +Sequence flow for gapless would be: +- Open +- Get caps / codec caps +- Set params +- Set metadata of the first track +- Fill data of the first track +- Trigger start +- User-space finished sending all, +- Indicaite next track data by sending set_next_track +- Set metadata of the next track +- then call partial_drain to flush most of buffer in DSP +- Fill data of the next track +- DSP switches to second track +(note: order for partial_drain and write for next track can be reversed as well) + Not supported: - Support for VoIP/circuit-switched calls is not the target of this @@ -171,12 +217,12 @@ Not supported: would be enabled with ALSA kcontrols. - Audio policy/resource management. This API does not provide any - hooks to query the utilization of the audio DSP, nor any premption + hooks to query the utilization of the audio DSP, nor any preemption mechanisms. -- No notion of underun/overrun. Since the bytes written are compressed +- No notion of underrun/overrun. Since the bytes written are compressed in nature and data written/read doesn't translate directly to - rendered output in time, this does not deal with underrun/overun and + rendered output in time, this does not deal with underrun/overrun and maybe dealt in user-library Credits: diff --git a/Documentation/sound/alsa/seq_oss.html b/Documentation/sound/alsa/seq_oss.html index d9776cf60c0..9663b45f6fd 100644 --- a/Documentation/sound/alsa/seq_oss.html +++ b/Documentation/sound/alsa/seq_oss.html @@ -285,7 +285,7 @@ sample data. <H4> 7.2.4 Close Callback</H4> The <TT>close</TT> callback is called when this device is closed by the -applicaion. If any private data was allocated in open callback, it must +application. If any private data was allocated in open callback, it must be released in the close callback. The deletion of ALSA port should be done here, too. This callback must not be NULL. <H4> diff --git a/Documentation/sound/alsa/soc/DPCM.txt b/Documentation/sound/alsa/soc/DPCM.txt new file mode 100644 index 00000000000..0110180b7ac --- /dev/null +++ b/Documentation/sound/alsa/soc/DPCM.txt @@ -0,0 +1,380 @@ +Dynamic PCM +=========== + +1. Description +============== + +Dynamic PCM allows an ALSA PCM device to digitally route its PCM audio to +various digital endpoints during the PCM stream runtime. e.g. PCM0 can route +digital audio to I2S DAI0, I2S DAI1 or PDM DAI2. This is useful for on SoC DSP +drivers that expose several ALSA PCMs and can route to multiple DAIs. + +The DPCM runtime routing is determined by the ALSA mixer settings in the same +way as the analog signal is routed in an ASoC codec driver. DPCM uses a DAPM +graph representing the DSP internal audio paths and uses the mixer settings to +determine the patch used by each ALSA PCM. + +DPCM re-uses all the existing component codec, platform and DAI drivers without +any modifications. + + +Phone Audio System with SoC based DSP +------------------------------------- + +Consider the following phone audio subsystem. This will be used in this +document for all examples :- + +| Front End PCMs | SoC DSP | Back End DAIs | Audio devices | + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +This diagram shows a simple smart phone audio subsystem. It supports Bluetooth, +FM digital radio, Speakers, Headset Jack, digital microphones and cellular +modem. This sound card exposes 4 DSP front end (FE) ALSA PCM devices and +supports 6 back end (BE) DAIs. Each FE PCM can digitally route audio data to any +of the BE DAIs. The FE PCM devices can also route audio to more than 1 BE DAI. + + + +Example - DPCM Switching playback from DAI0 to DAI1 +--------------------------------------------------- + +Audio is being played to the Headset. After a while the user removes the headset +and audio continues playing on the speakers. + +Playback on PCM0 to Headset would look like :- + + ************* +PCM0 <============> * * <====DAI0=====> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +The headset is removed from the jack by user so the speakers must now be used :- + + ************* +PCM0 <============> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <====DAI1=====> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +The audio driver processes this as follows :- + + 1) Machine driver receives Jack removal event. + + 2) Machine driver OR audio HAL disables the Headset path. + + 3) DPCM runs the PCM trigger(stop), hw_free(), shutdown() operations on DAI0 + for headset since the path is now disabled. + + 4) Machine driver or audio HAL enables the speaker path. + + 5) DPCM runs the PCM ops for startup(), hw_params(), prepapre() and + trigger(start) for DAI1 Speakers since the path is enabled. + +In this example, the machine driver or userspace audio HAL can alter the routing +and then DPCM will take care of managing the DAI PCM operations to either bring +the link up or down. Audio playback does not stop during this transition. + + + +DPCM machine driver +=================== + +The DPCM enabled ASoC machine driver is similar to normal machine drivers +except that we also have to :- + + 1) Define the FE and BE DAI links. + + 2) Define any FE/BE PCM operations. + + 3) Define widget graph connections. + + +1 FE and BE DAI links +--------------------- + +| Front End PCMs | SoC DSP | Back End DAIs | Audio devices | + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <----DAI2-----> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +For the example above we have to define 4 FE DAI links and 6 BE DAI links. The +FE DAI links are defined as follows :- + +static struct snd_soc_dai_link machine_dais[] = { + { + .name = "PCM0 System", + .stream_name = "System Playback", + .cpu_dai_name = "System Pin", + .platform_name = "dsp-audio", + .codec_name = "snd-soc-dummy", + .codec_dai_name = "snd-soc-dummy-dai", + .dynamic = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, + .dpcm_playback = 1, + }, + .....< other FE and BE DAI links here > +}; + +This FE DAI link is pretty similar to a regular DAI link except that we also +set the DAI link to a DPCM FE with the "dynamic = 1". The supported FE stream +directions should also be set with the "dpcm_playback" and "dpcm_capture" +flags. There is also an option to specify the ordering of the trigger call for +each FE. This allows the ASoC core to trigger the DSP before or after the other +components (as some DSPs have strong requirements for the ordering DAI/DSP +start and stop sequences). + +The FE DAI above sets the codec and code DAIs to dummy devices since the BE is +dynamic and will change depending on runtime config. + +The BE DAIs are configured as follows :- + +static struct snd_soc_dai_link machine_dais[] = { + .....< FE DAI links here > + { + .name = "Codec Headset", + .cpu_dai_name = "ssp-dai.0", + .platform_name = "snd-soc-dummy", + .no_pcm = 1, + .codec_name = "rt5640.0-001c", + .codec_dai_name = "rt5640-aif1", + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .be_hw_params_fixup = hswult_ssp0_fixup, + .ops = &haswell_ops, + .dpcm_playback = 1, + .dpcm_capture = 1, + }, + .....< other BE DAI links here > +}; + +This BE DAI link connects DAI0 to the codec (in this case RT5460 AIF1). It sets +the "no_pcm" flag to mark it has a BE and sets flags for supported stream +directions using "dpcm_playback" and "dpcm_capture" above. + +The BE has also flags set for ignoring suspend and PM down time. This allows +the BE to work in a hostless mode where the host CPU is not transferring data +like a BT phone call :- + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <----DAI1-----> Codec Speakers + * DSP * +PCM2 <------------> * * <====DAI2=====> MODEM + * * +PCM3 <------------> * * <====DAI3=====> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +This allows the host CPU to sleep whilst the DSP, MODEM DAI and the BT DAI are +still in operation. + +A BE DAI link can also set the codec to a dummy device if the code is a device +that is managed externally. + +Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the +DSP firmware. + + +2 FE/BE PCM operations +---------------------- + +The BE above also exports some PCM operations and a "fixup" callback. The fixup +callback is used by the machine driver to (re)configure the DAI based upon the +FE hw params. i.e. the DSP may perform SRC or ASRC from the FE to BE. + +e.g. DSP converts all FE hw params to run at fixed rate of 48k, 16bit, stereo for +DAI0. This means all FE hw_params have to be fixed in the machine driver for +DAI0 so that the DAI is running at desired configuration regardless of the FE +configuration. + +static int dai0_fixup(struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate = hw_param_interval(params, + SNDRV_PCM_HW_PARAM_RATE); + struct snd_interval *channels = hw_param_interval(params, + SNDRV_PCM_HW_PARAM_CHANNELS); + + /* The DSP will covert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set DAI0 to 16 bit */ + snd_mask_set(¶ms->masks[SNDRV_PCM_HW_PARAM_FORMAT - + SNDRV_PCM_HW_PARAM_FIRST_MASK], + SNDRV_PCM_FORMAT_S16_LE); + return 0; +} + +The other PCM operation are the same as for regular DAI links. Use as necessary. + + +3 Widget graph connections +-------------------------- + +The BE DAI links will normally be connected to the graph at initialisation time +by the ASoC DAPM core. However, if the BE codec or BE DAI is a dummy then this +has to be set explicitly in the driver :- + +/* BE for codec Headset - DAI0 is dummy and managed by DSP FW */ +{"DAI0 CODEC IN", NULL, "AIF1 Capture"}, +{"AIF1 Playback", NULL, "DAI0 CODEC OUT"}, + + +Writing a DPCM DSP driver +========================= + +The DPCM DSP driver looks much like a standard platform class ASoC driver +combined with elements from a codec class driver. A DSP platform driver must +implement :- + + 1) Front End PCM DAIs - i.e. struct snd_soc_dai_driver. + + 2) DAPM graph showing DSP audio routing from FE DAIs to BEs. + + 3) DAPM widgets from DSP graph. + + 4) Mixers for gains, routing, etc. + + 5) DMA configuration. + + 6) BE AIF widgets. + +Items 6 is important for routing the audio outside of the DSP. AIF need to be +defined for each BE and each stream direction. e.g for BE DAI0 above we would +have :- + +SND_SOC_DAPM_AIF_IN("DAI0 RX", NULL, 0, SND_SOC_NOPM, 0, 0), +SND_SOC_DAPM_AIF_OUT("DAI0 TX", NULL, 0, SND_SOC_NOPM, 0, 0), + +The BE AIF are used to connect the DSP graph to the graphs for the other +component drivers (e.g. codec graph). + + +Hostless PCM streams +==================== + +A hostless PCM stream is a stream that is not routed through the host CPU. An +example of this would be a phone call from handset to modem. + + + ************* +PCM0 <------------> * * <----DAI0-----> Codec Headset + * * +PCM1 <------------> * * <====DAI1=====> Codec Speakers/Mic + * DSP * +PCM2 <------------> * * <====DAI2=====> MODEM + * * +PCM3 <------------> * * <----DAI3-----> BT + * * + * * <----DAI4-----> DMIC + * * + * * <----DAI5-----> FM + ************* + +In this case the PCM data is routed via the DSP. The host CPU in this use case +is only used for control and can sleep during the runtime of the stream. + +The host can control the hostless link either by :- + + 1) Configuring the link as a CODEC <-> CODEC style link. In this case the link + is enabled or disabled by the state of the DAPM graph. This usually means + there is a mixer control that can be used to connect or disconnect the path + between both DAIs. + + 2) Hostless FE. This FE has a virtual connection to the BE DAI links on the DAPM + graph. Control is then carried out by the FE as regular PCM operations. + This method gives more control over the DAI links, but requires much more + userspace code to control the link. Its recommended to use CODEC<->CODEC + unless your HW needs more fine grained sequencing of the PCM ops. + + +CODEC <-> CODEC link +-------------------- + +This DAI link is enabled when DAPM detects a valid path within the DAPM graph. +The machine driver sets some additional parameters to the DAI link i.e. + +static const struct snd_soc_pcm_stream dai_params = { + .formats = SNDRV_PCM_FMTBIT_S32_LE, + .rate_min = 8000, + .rate_max = 8000, + .channels_min = 2, + .channels_max = 2, +}; + +static struct snd_soc_dai_link dais[] = { + < ... more DAI links above ... > + { + .name = "MODEM", + .stream_name = "MODEM", + .cpu_dai_name = "dai2", + .codec_dai_name = "modem-aif1", + .codec_name = "modem", + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF + | SND_SOC_DAIFMT_CBM_CFM, + .params = &dai_params, + } + < ... more DAI links here ... > + +These parameters are used to configure the DAI hw_params() when DAPM detects a +valid path and then calls the PCM operations to start the link. DAPM will also +call the appropriate PCM operations to disable the DAI when the path is no +longer valid. + + +Hostless FE +----------- + +The DAI link(s) are enabled by a FE that does not read or write any PCM data. +This means creating a new FE that is connected with a virtual path to both +DAI links. The DAI links will be started when the FE PCM is started and stopped +when the FE PCM is stopped. Note that the FE PCM cannot read or write data in +this configuration. + + diff --git a/Documentation/sound/alsa/soc/codec.txt b/Documentation/sound/alsa/soc/codec.txt index bce23a4a787..db5f9c9ae14 100644 --- a/Documentation/sound/alsa/soc/codec.txt +++ b/Documentation/sound/alsa/soc/codec.txt @@ -1,22 +1,23 @@ -ASoC Codec Driver -================= +ASoC Codec Class Driver +======================= -The codec driver is generic and hardware independent code that configures the -codec to provide audio capture and playback. It should contain no code that is -specific to the target platform or machine. All platform and machine specific -code should be added to the platform and machine drivers respectively. +The codec class driver is generic and hardware independent code that configures +the codec, FM, MODEM, BT or external DSP to provide audio capture and playback. +It should contain no code that is specific to the target platform or machine. +All platform and machine specific code should be added to the platform and +machine drivers respectively. -Each codec driver *must* provide the following features:- +Each codec class driver *must* provide the following features:- 1) Codec DAI and PCM configuration - 2) Codec control IO - using I2C, 3 Wire(SPI) or both APIs + 2) Codec control IO - using RegMap API 3) Mixers and audio controls 4) Codec audio operations + 5) DAPM description. + 6) DAPM event handler. Optionally, codec drivers can also provide:- - 5) DAPM description. - 6) DAPM event handler. 7) DAC Digital mute control. Its probably best to use this guide in conjunction with the existing codec @@ -64,26 +65,9 @@ struct snd_soc_dai_driver wm8731_dai = { 2 - Codec control IO -------------------- The codec can usually be controlled via an I2C or SPI style interface -(AC97 combines control with data in the DAI). The codec drivers provide -functions to read and write the codec registers along with supplying a -register cache:- - - /* IO control data and register cache */ - void *control_data; /* codec control (i2c/3wire) data */ - void *reg_cache; - -Codec read/write should do any data formatting and call the hardware -read write below to perform the IO. These functions are called by the -core and ALSA when performing DAPM or changing the mixer:- - - unsigned int (*read)(struct snd_soc_codec *, unsigned int); - int (*write)(struct snd_soc_codec *, unsigned int, unsigned int); - -Codec hardware IO functions - usually points to either the I2C, SPI or AC97 -read/write:- - - hw_write_t hw_write; - hw_read_t hw_read; +(AC97 combines control with data in the DAI). The codec driver should use the +Regmap API for all codec IO. Please see include/linux/regmap.h and existing +codec drivers for example regmap usage. 3 - Mixers and audio controls @@ -127,7 +111,7 @@ Defines a stereo enumerated control 4 - Codec Audio Operations -------------------------- -The codec driver also supports the following ALSA operations:- +The codec driver also supports the following ALSA PCM operations:- /* SoC audio ops */ struct snd_soc_ops { diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt index 05bf5a0eee4..6faab488000 100644 --- a/Documentation/sound/alsa/soc/dapm.txt +++ b/Documentation/sound/alsa/soc/dapm.txt @@ -21,7 +21,7 @@ level power systems. There are 4 power domains within DAPM - 1. Codec domain - VREF, VMID (core codec and audio power) + 1. Codec bias domain - VREF, VMID (core codec and audio power) Usually controlled at codec probe/remove and suspend/resume, although can be set at stream time if power is not needed for sidetone, etc. @@ -30,7 +30,7 @@ There are 4 power domains within DAPM machine driver and responds to asynchronous events e.g when HP are inserted - 3. Path domain - audio susbsystem signal paths + 3. Path domain - audio subsystem signal paths Automatically set when mixer and mux settings are changed by the user. e.g. alsamixer, amixer. @@ -63,14 +63,22 @@ Audio DAPM widgets fall into a number of types:- o Line - Line Input/Output (and optional Jack) o Speaker - Speaker o Supply - Power or clock supply widget used by other widgets. + o Regulator - External regulator that supplies power to audio components. + o Clock - External clock that supplies clock to audio components. + o AIF IN - Audio Interface Input (with TDM slot mask). + o AIF OUT - Audio Interface Output (with TDM slot mask). + o Siggen - Signal Generator. + o DAI IN - Digital Audio Interface Input. + o DAI OUT - Digital Audio Interface Output. + o DAI Link - DAI Link between two DAI structures */ o Pre - Special PRE widget (exec before all others) o Post - Special POST widget (exec after all others) (Widgets are defined in include/sound/soc-dapm.h) -Widgets are usually added in the codec driver and the machine driver. There are -convenience macros defined in soc-dapm.h that can be used to quickly build a -list of widgets of the codecs and machines DAPM widgets. +Widgets can be added to the sound card by any of the component driver types. +There are convenience macros defined in soc-dapm.h that can be used to quickly +build a list of widgets of the codecs and machines DAPM widgets. Most widgets have a name, register, shift and invert. Some widgets have extra parameters for stream name and kcontrols. @@ -80,11 +88,13 @@ parameters for stream name and kcontrols. ------------------------- Stream Widgets relate to the stream power domain and only consist of ADCs -(analog to digital converters) and DACs (digital to analog converters). +(analog to digital converters), DACs (digital to analog converters), +AIF IN and AIF OUT. Stream widgets have the following format:- SND_SOC_DAPM_DAC(name, stream name, reg, shift, invert), +SND_SOC_DAPM_AIF_IN(name, stream, slot, reg, shift, invert) NOTE: the stream name must match the corresponding stream name in your codec snd_soc_codec_dai. @@ -94,6 +104,11 @@ e.g. stream widgets for HiFi playback and capture SND_SOC_DAPM_DAC("HiFi DAC", "HiFi Playback", REG, 3, 1), SND_SOC_DAPM_ADC("HiFi ADC", "HiFi Capture", REG, 2, 1), +e.g. stream widgets for AIF + +SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), +SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0), + 2.2 Path Domain Widgets ----------------------- @@ -121,12 +136,14 @@ If you dont want the mixer elements prefixed with the name of the mixer widget, you can use SND_SOC_DAPM_MIXER_NAMED_CTL instead. the parameters are the same as for SND_SOC_DAPM_MIXER. -2.3 Platform/Machine domain Widgets ------------------------------------ + +2.3 Machine domain Widgets +-------------------------- Machine widgets are different from codec widgets in that they don't have a codec register bit associated with them. A machine widget is assigned to each -machine audio component (non codec) that can be independently powered. e.g. +machine audio component (non codec or DSP) that can be independently +powered. e.g. o Speaker Amp o Microphone Bias @@ -146,12 +163,12 @@ static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event) SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias), -2.4 Codec Domain ----------------- +2.4 Codec (BIAS) Domain +----------------------- -The codec power domain has no widgets and is handled by the codecs DAPM event -handler. This handler is called when the codec powerstate is changed wrt to any -stream event or by kernel PM events. +The codec bias power domain has no widgets and is handled by the codecs DAPM +event handler. This handler is called when the codec powerstate is changed wrt +to any stream event or by kernel PM events. 2.5 Virtual Widgets @@ -169,15 +186,16 @@ After all the widgets have been defined, they can then be added to the DAPM subsystem individually with a call to snd_soc_dapm_new_control(). -3. Codec Widget Interconnections -================================ +3. Codec/DSP Widget Interconnections +==================================== -Widgets are connected to each other within the codec and machine by audio paths -(called interconnections). Each interconnection must be defined in order to -create a map of all audio paths between widgets. +Widgets are connected to each other within the codec, platform and machine by +audio paths (called interconnections). Each interconnection must be defined in +order to create a map of all audio paths between widgets. -This is easiest with a diagram of the codec (and schematic of the machine audio -system), as it requires joining widgets together via their audio signal paths. +This is easiest with a diagram of the codec or DSP (and schematic of the machine +audio system), as it requires joining widgets together via their audio signal +paths. e.g., from the WM8731 output mixer (wm8731.c) @@ -247,16 +265,9 @@ machine and includes the codec. e.g. o Mic Jack o Codec Pins -When a codec pin is NC it can be marked as not used with a call to - -snd_soc_dapm_set_endpoint(codec, "Widget Name", 0); - -The last argument is 0 for inactive and 1 for active. This way the pin and its -input widget will never be powered up and consume power. - -This also applies to machine widgets. e.g. if a headphone is connected to a -jack then the jack can be marked active. If the headphone is removed, then -the headphone jack can be marked inactive. +Endpoints are added to the DAPM graph so that their usage can be determined in +order to save power. e.g. NC codecs pins will be switched OFF, unconnected +jacks can also be switched OFF. 5 DAPM Widget Events diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt index d50c14df341..74056dba52b 100644 --- a/Documentation/sound/alsa/soc/machine.txt +++ b/Documentation/sound/alsa/soc/machine.txt @@ -1,8 +1,10 @@ ASoC Machine Driver =================== -The ASoC machine (or board) driver is the code that glues together the platform -and codec drivers. +The ASoC machine (or board) driver is the code that glues together all the +component drivers (e.g. codecs, platforms and DAIs). It also describes the +relationships between each componnent which include audio paths, GPIOs, +interrupts, clocking, jacks and voltage regulators. The machine driver can contain codec and platform specific code. It registers the audio subsystem with the kernel as a platform device and is represented by diff --git a/Documentation/sound/alsa/soc/overview.txt b/Documentation/sound/alsa/soc/overview.txt index 138ac88c146..ff88f52eec9 100644 --- a/Documentation/sound/alsa/soc/overview.txt +++ b/Documentation/sound/alsa/soc/overview.txt @@ -49,18 +49,23 @@ features :- * Machine specific controls: Allow machines to add controls to the sound card (e.g. volume control for speaker amplifier). -To achieve all this, ASoC basically splits an embedded audio system into 3 -components :- +To achieve all this, ASoC basically splits an embedded audio system into +multiple re-usable component drivers :- - * Codec driver: The codec driver is platform independent and contains audio - controls, audio interface capabilities, codec DAPM definition and codec IO - functions. + * Codec class drivers: The codec class driver is platform independent and + contains audio controls, audio interface capabilities, codec DAPM + definition and codec IO functions. This class extends to BT, FM and MODEM + ICs if required. Codec class drivers should be generic code that can run + on any architecture and machine. - * Platform driver: The platform driver contains the audio DMA engine and audio - interface drivers (e.g. I2S, AC97, PCM) for that platform. + * Platform class drivers: The platform class driver includes the audio DMA + engine driver, digital audio interface (DAI) drivers (e.g. I2S, AC97, PCM) + and any audio DSP drivers for that platform. - * Machine driver: The machine driver handles any machine specific controls and - audio events (e.g. turning on an amp at start of playback). + * Machine class driver: The machine driver class acts as the glue that + decribes and binds the other component drivers together to form an ALSA + "sound card device". It handles any machine specific controls and + machine level audio events (e.g. turning on an amp at start of playback). Documentation @@ -84,3 +89,7 @@ machine.txt: Machine driver internals. pop_clicks.txt: How to minimise audio artifacts. clocking.txt: ASoC clocking for best power performance. + +jack.txt: ASoC jack detection. + +DPCM.txt: Dynamic PCM - Describes DPCM with DSP examples. diff --git a/Documentation/sound/alsa/soc/platform.txt b/Documentation/sound/alsa/soc/platform.txt index d57efad37e0..3a08a2c9150 100644 --- a/Documentation/sound/alsa/soc/platform.txt +++ b/Documentation/sound/alsa/soc/platform.txt @@ -1,9 +1,9 @@ ASoC Platform Driver ==================== -An ASoC platform driver can be divided into audio DMA and SoC DAI configuration -and control. The platform drivers only target the SoC CPU and must have no board -specific code. +An ASoC platform driver class can be divided into audio DMA drivers, SoC DAI +drivers and DSP drivers. The platform drivers only target the SoC CPU and must +have no board specific code. Audio DMA ========= @@ -64,3 +64,16 @@ Each SoC DAI driver must provide the following features:- 5) Suspend and resume (optional) Please see codec.txt for a description of items 1 - 4. + + +SoC DSP Drivers +=============== + +Each SoC DSP driver usually supplies the following features :- + + 1) DAPM graph + 2) Mixer controls + 3) DMA IO to/from DSP buffers (if applicable) + 4) Definition of DSP front end (FE) PCM devices. + +Please see DPCM.txt for a description of item 4. diff --git a/Documentation/sound/oss/vwsnd b/Documentation/sound/oss/vwsnd deleted file mode 100644 index 4c6cbdb3c54..00000000000 --- a/Documentation/sound/oss/vwsnd +++ /dev/null @@ -1,293 +0,0 @@ -vwsnd - Sound driver for the Silicon Graphics 320 and 540 Visual -Workstations' onboard audio. - -Copyright 1999 Silicon Graphics, Inc. All rights reserved. - - -At the time of this writing, March 1999, there are two models of -Visual Workstation, the 320 and the 540. This document only describes -those models. Future Visual Workstation models may have different -sound capabilities, and this driver will probably not work on those -boxes. - -The Visual Workstation has an Analog Devices AD1843 "SoundComm" audio -codec chip. The AD1843 is accessed through the Cobalt I/O ASIC, also -known as Lithium. This driver programs both chips. - -============================================================================== -QUICK CONFIGURATION - - # insmod soundcore - # insmod vwsnd - -============================================================================== -I/O CONNECTIONS - -On the Visual Workstation, only three of the AD1843 inputs are hooked -up. The analog line in jacks are connected to the AD1843's AUX1 -input. The CD audio lines are connected to the AD1843's AUX2 input. -The microphone jack is connected to the AD1843's MIC input. The mic -jack is mono, but the signal is delivered to both the left and right -MIC inputs. You can record in stereo from the mic input, but you will -get the same signal on both channels (within the limits of A/D -accuracy). Full scale on the Line input is +/- 2.0 V. Full scale on -the MIC input is 20 dB less, or +/- 0.2 V. - -The AD1843's LOUT1 outputs are connected to the Line Out jacks. The -AD1843's HPOUT outputs are connected to the speaker/headphone jack. -LOUT2 is not connected. Line out's maximum level is +/- 2.0 V peak to -peak. The speaker/headphone out's maximum is +/- 4.0 V peak to peak. - -The AD1843's PCM input channel and one of its output channels (DAC1) -are connected to Lithium. The other output channel (DAC2) is not -connected. - -============================================================================== -CAPABILITIES - -The AD1843 has PCM input and output (Pulse Code Modulation, also known -as wavetable). PCM input and output can be mono or stereo in any of -four formats. The formats are 16 bit signed and 8 bit unsigned, -u-Law, and A-Law format. Any sample rate from 4 KHz to 49 KHz is -available, in 1 Hz increments. - -The AD1843 includes an analog mixer that can mix all three input -signals (line, mic and CD) into the analog outputs. The mixer has a -separate gain control and mute switch for each input. - -There are two outputs, line out and speaker/headphone out. They -always produce the same signal, and the speaker always has 3 dB more -gain than the line out. The speaker/headphone output can be muted, -but this driver does not export that function. - -The hardware can sync audio to the video clock, but this driver does -not have a way to specify syncing to video. - -============================================================================== -PROGRAMMING - -This section explains the API supported by the driver. Also see the -Open Sound Programming Guide at http://www.opensound.com/pguide/ . -This section assumes familiarity with that document. - -The driver has two interfaces, an I/O interface and a mixer interface. -There is no MIDI or sequencer capability. - -============================================================================== -PROGRAMMING PCM I/O - -The I/O interface is usually accessed as /dev/audio or /dev/dsp. -Using the standard Open Sound System (OSS) ioctl calls, the sample -rate, number of channels, and sample format may be set within the -limitations described above. The driver supports triggering. It also -supports getting the input and output pointers with one-sample -accuracy. - -The SNDCTL_DSP_GETCAP ioctl returns these capabilities. - - DSP_CAP_DUPLEX - driver supports full duplex. - - DSP_CAP_TRIGGER - driver supports triggering. - - DSP_CAP_REALTIME - values returned by SNDCTL_DSP_GETIPTR - and SNDCTL_DSP_GETOPTR are accurate to a few samples. - -Memory mapping (mmap) is not implemented. - -The driver permits subdivided fragment sizes from 64 to 4096 bytes. -The number of fragments can be anything from 3 fragments to however -many fragments fit into 124 kilobytes. It is up to the user to -determine how few/small fragments can be used without introducing -glitches with a given workload. Linux is not realtime, so we can't -promise anything. (sigh...) - -When this driver is switched into or out of mu-Law or A-Law mode on -output, it may produce an audible click. This is unavoidable. To -prevent clicking, use signed 16-bit mode instead, and convert from -mu-Law or A-Law format in software. - -============================================================================== -PROGRAMMING THE MIXER INTERFACE - -The mixer interface is usually accessed as /dev/mixer. It is accessed -through ioctls. The mixer allows the application to control gain or -mute several audio signal paths, and also allows selection of the -recording source. - -Each of the constants described here can be read using the -MIXER_READ(SOUND_MIXER_xxx) ioctl. Those that are not read-only can -also be written using the MIXER_WRITE(SOUND_MIXER_xxx) ioctl. In most -cases, <sys/soundcard.h> defines constants SOUND_MIXER_READ_xxx and -SOUND_MIXER_WRITE_xxx which work just as well. - -SOUND_MIXER_CAPS Read-only - -This is a mask of optional driver capabilities that are implemented. -This driver's only capability is SOUND_CAP_EXCL_INPUT, which means -that only one recording source can be active at a time. - -SOUND_MIXER_DEVMASK Read-only - -This is a mask of the sound channels. This driver's channels are PCM, -LINE, MIC, CD, and RECLEV. - -SOUND_MIXER_STEREODEVS Read-only - -This is a mask of which sound channels are capable of stereo. All -channels are capable of stereo. (But see caveat on MIC input in I/O -CONNECTIONS section above). - -SOUND_MIXER_OUTMASK Read-only - -This is a mask of channels that route inputs through to outputs. -Those are LINE, MIC, and CD. - -SOUND_MIXER_RECMASK Read-only - -This is a mask of channels that can be recording sources. Those are -PCM, LINE, MIC, CD. - -SOUND_MIXER_PCM Default: 0x5757 (0 dB) - -This is the gain control for PCM output. The left and right channel -gain are controlled independently. This gain control has 64 levels, -which range from -82.5 dB to +12.0 dB in 1.5 dB steps. Those 64 -levels are mapped onto 100 levels at the ioctl, see below. - -SOUND_MIXER_LINE Default: 0x4a4a (0 dB) - -This is the gain control for mixing the Line In source into the -outputs. The left and right channel gain are controlled -independently. This gain control has 32 levels, which range from --34.5 dB to +12.0 dB in 1.5 dB steps. Those 32 levels are mapped onto -100 levels at the ioctl, see below. - -SOUND_MIXER_MIC Default: 0x4a4a (0 dB) - -This is the gain control for mixing the MIC source into the outputs. -The left and right channel gain are controlled independently. This -gain control has 32 levels, which range from -34.5 dB to +12.0 dB in -1.5 dB steps. Those 32 levels are mapped onto 100 levels at the -ioctl, see below. - -SOUND_MIXER_CD Default: 0x4a4a (0 dB) - -This is the gain control for mixing the CD audio source into the -outputs. The left and right channel gain are controlled -independently. This gain control has 32 levels, which range from --34.5 dB to +12.0 dB in 1.5 dB steps. Those 32 levels are mapped onto -100 levels at the ioctl, see below. - -SOUND_MIXER_RECLEV Default: 0 (0 dB) - -This is the gain control for PCM input (RECording LEVel). The left -and right channel gain are controlled independently. This gain -control has 16 levels, which range from 0 dB to +22.5 dB in 1.5 dB -steps. Those 16 levels are mapped onto 100 levels at the ioctl, see -below. - -SOUND_MIXER_RECSRC Default: SOUND_MASK_LINE - -This is a mask of currently selected PCM input sources (RECording -SouRCes). Because the AD1843 can only have a single recording source -at a time, only one bit at a time can be set in this mask. The -allowable values are SOUND_MASK_PCM, SOUND_MASK_LINE, SOUND_MASK_MIC, -or SOUND_MASK_CD. Selecting SOUND_MASK_PCM sets up internal -resampling which is useful for loopback testing and for hardware -sample rate conversion. But software sample rate conversion is -probably faster, so I don't know how useful that is. - -SOUND_MIXER_OUTSRC DEFAULT: SOUND_MASK_LINE|SOUND_MASK_MIC|SOUND_MASK_CD - -This is a mask of sources that are currently passed through to the -outputs. Those sources whose bits are not set are muted. - -============================================================================== -GAIN CONTROL - -There are five gain controls listed above. Each has 16, 32, or 64 -steps. Each control has 1.5 dB of gain per step. Each control is -stereo. - -The OSS defines the argument to a channel gain ioctl as having two -components, left and right, each of which ranges from 0 to 100. The -two components are packed into the same word, with the left side gain -in the least significant byte, and the right side gain in the second -least significant byte. In C, we would say this. - - #include <assert.h> - - ... - - assert(leftgain >= 0 && leftgain <= 100); - assert(rightgain >= 0 && rightgain <= 100); - arg = leftgain | rightgain << 8; - -So each OSS gain control has 101 steps. But the hardware has 16, 32, -or 64 steps. The hardware steps are spread across the 101 OSS steps -nearly evenly. The conversion formulas are like this, given N equals -16, 32, or 64. - - int round = N/2 - 1; - OSS_gain_steps = (hw_gain_steps * 100 + round) / (N - 1); - hw_gain_steps = (OSS_gain_steps * (N - 1) + round) / 100; - -Here is a snippet of C code that will return the left and right gain -of any channel in dB. Pass it one of the predefined gain_desc_t -structures to access any of the five channels' gains. - - typedef struct gain_desc { - float min_gain; - float gain_step; - int nbits; - int chan; - } gain_desc_t; - - const gain_desc_t gain_pcm = { -82.5, 1.5, 6, SOUND_MIXER_PCM }; - const gain_desc_t gain_line = { -34.5, 1.5, 5, SOUND_MIXER_LINE }; - const gain_desc_t gain_mic = { -34.5, 1.5, 5, SOUND_MIXER_MIC }; - const gain_desc_t gain_cd = { -34.5, 1.5, 5, SOUND_MIXER_CD }; - const gain_desc_t gain_reclev = { 0.0, 1.5, 4, SOUND_MIXER_RECLEV }; - - int get_gain_dB(int fd, const gain_desc_t *gp, - float *left, float *right) - { - int word; - int lg, rg; - int mask = (1 << gp->nbits) - 1; - - if (ioctl(fd, MIXER_READ(gp->chan), &word) != 0) - return -1; /* fail */ - lg = word & 0xFF; - rg = word >> 8 & 0xFF; - lg = (lg * mask + mask / 2) / 100; - rg = (rg * mask + mask / 2) / 100; - *left = gp->min_gain + gp->gain_step * lg; - *right = gp->min_gain + gp->gain_step * rg; - return 0; - } - -And here is the corresponding routine to set a channel's gain in dB. - - int set_gain_dB(int fd, const gain_desc_t *gp, float left, float right) - { - float max_gain = - gp->min_gain + (1 << gp->nbits) * gp->gain_step; - float round = gp->gain_step / 2; - int mask = (1 << gp->nbits) - 1; - int word; - int lg, rg; - - if (left < gp->min_gain || right < gp->min_gain) - return EINVAL; - lg = (left - gp->min_gain + round) / gp->gain_step; - rg = (right - gp->min_gain + round) / gp->gain_step; - if (lg >= (1 << gp->nbits) || rg >= (1 << gp->nbits)) - return EINVAL; - lg = (100 * lg + mask / 2) / mask; - rg = (100 * rg + mask / 2) / mask; - word = lg | rg << 8; - - return ioctl(fd, MIXER_WRITE(gp->chan), &word); - } - |
