Files
go-ts/pkg/audio/playback.go

287 lines
6.3 KiB
Go

package audio
import (
"encoding/binary"
"fmt"
"sync"
"time"
"unsafe"
"github.com/go-ole/go-ole"
"github.com/moutend/go-wca/pkg/wca"
)
// Player handles WASAPI audio playback
type Player struct {
client *wca.IAudioClient
renderClient *wca.IAudioRenderClient
waveFormat *wca.WAVEFORMATEX
bufferSize uint32
volume float32
muted bool
mu sync.Mutex
running bool
stopChan chan struct{}
// Audio buffer - accumulates incoming audio
audioBuffer []int16
bufferMu sync.Mutex
// Frame queue (960 samples = 20ms at 48kHz)
frameQueue chan []int16
}
const frameSamples = 960 // 20ms at 48kHz
// NewPlayer creates a new WASAPI audio player
func NewPlayer() (*Player, error) {
// Initialize COM using go-ole
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED); err != nil {
// Ignore if already initialized
}
// Get default audio endpoint
var deviceEnumerator *wca.IMMDeviceEnumerator
if err := wca.CoCreateInstance(
wca.CLSID_MMDeviceEnumerator,
0,
wca.CLSCTX_ALL,
wca.IID_IMMDeviceEnumerator,
&deviceEnumerator,
); err != nil {
return nil, fmt.Errorf("failed to create device enumerator: %w", err)
}
defer deviceEnumerator.Release()
var device *wca.IMMDevice
if err := deviceEnumerator.GetDefaultAudioEndpoint(wca.ERender, wca.EConsole, &device); err != nil {
return nil, fmt.Errorf("failed to get default render device: %w", err)
}
defer device.Release()
// Activate audio client
var audioClient *wca.IAudioClient
if err := device.Activate(wca.IID_IAudioClient, wca.CLSCTX_ALL, nil, &audioClient); err != nil {
return nil, fmt.Errorf("failed to activate audio client: %w", err)
}
// Set up format for 48kHz mono 16-bit (TeamSpeak format)
waveFormat := &wca.WAVEFORMATEX{
WFormatTag: wca.WAVE_FORMAT_PCM,
NChannels: 1,
NSamplesPerSec: 48000,
WBitsPerSample: 16,
NBlockAlign: 2,
NAvgBytesPerSec: 96000,
CbSize: 0,
}
// Initialize in shared mode - 100ms buffer
duration := wca.REFERENCE_TIME(100 * 10000) // 100ms in 100-nanosecond units
if err := audioClient.Initialize(
wca.AUDCLNT_SHAREMODE_SHARED,
wca.AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM|wca.AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY,
duration,
0,
waveFormat,
nil,
); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to initialize audio client: %w", err)
}
// Get buffer size
var bufferSize uint32
if err := audioClient.GetBufferSize(&bufferSize); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to get buffer size: %w", err)
}
// Get render client
var renderClient *wca.IAudioRenderClient
if err := audioClient.GetService(wca.IID_IAudioRenderClient, &renderClient); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to get render client: %w", err)
}
return &Player{
client: audioClient,
renderClient: renderClient,
waveFormat: waveFormat,
bufferSize: bufferSize,
volume: 1.0,
muted: false,
stopChan: make(chan struct{}),
audioBuffer: make([]int16, 0, frameSamples*50), // ~1 second buffer
frameQueue: make(chan []int16, 100), // ~2 seconds of frames
}, nil
}
// Start begins audio playback
func (p *Player) Start() error {
p.mu.Lock()
if p.running {
p.mu.Unlock()
return nil
}
p.running = true
p.mu.Unlock()
if err := p.client.Start(); err != nil {
return fmt.Errorf("failed to start audio client: %w", err)
}
// Playback loop writes frames from queue to WASAPI
go p.playbackLoop()
return nil
}
// Stop stops audio playback
func (p *Player) Stop() {
p.mu.Lock()
if !p.running {
p.mu.Unlock()
return
}
p.running = false
p.mu.Unlock()
close(p.stopChan)
p.client.Stop()
}
// Close releases all resources
func (p *Player) Close() {
p.Stop()
if p.renderClient != nil {
p.renderClient.Release()
}
if p.client != nil {
p.client.Release()
}
ole.CoUninitialize()
}
// PlayPCM queues PCM audio for playback
// Accumulates samples and queues complete 960-sample frames
func (p *Player) PlayPCM(samples []int16) {
if p.muted {
return
}
// Apply volume
adjusted := samples
if p.volume != 1.0 {
adjusted = make([]int16, len(samples))
for i, s := range samples {
adjusted[i] = int16(float32(s) * p.volume)
}
}
p.bufferMu.Lock()
p.audioBuffer = append(p.audioBuffer, adjusted...)
// Queue complete 960-sample frames
for len(p.audioBuffer) >= frameSamples {
frame := make([]int16, frameSamples)
copy(frame, p.audioBuffer[:frameSamples])
p.audioBuffer = p.audioBuffer[frameSamples:]
select {
case p.frameQueue <- frame:
default:
// Queue full, drop oldest frame
select {
case <-p.frameQueue:
default:
}
p.frameQueue <- frame
}
}
p.bufferMu.Unlock()
}
// SetVolume sets playback volume (0.0 to 1.0)
func (p *Player) SetVolume(vol float32) {
if vol < 0 {
vol = 0
}
if vol > 1.0 {
vol = 1.0
}
p.mu.Lock()
p.volume = vol
p.mu.Unlock()
}
// GetVolume returns current volume (0.0 to 1.0)
func (p *Player) GetVolume() float32 {
p.mu.Lock()
defer p.mu.Unlock()
return p.volume
}
// SetMuted sets mute state
func (p *Player) SetMuted(muted bool) {
p.mu.Lock()
p.muted = muted
p.mu.Unlock()
}
// IsMuted returns mute state
func (p *Player) IsMuted() bool {
p.mu.Lock()
defer p.mu.Unlock()
return p.muted
}
func (p *Player) playbackLoop() {
// Use 20ms ticker matching TeamSpeak frame rate
ticker := time.NewTicker(20 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-p.stopChan:
return
case <-ticker.C:
p.writeFrame()
}
}
}
func (p *Player) writeFrame() {
// Get current padding (samples already in buffer)
var padding uint32
if err := p.client.GetCurrentPadding(&padding); err != nil {
return
}
available := p.bufferSize - padding
if available < frameSamples {
return // Not enough space for a full frame
}
// Try to get a frame from the queue
select {
case frame := <-p.frameQueue:
var buffer *byte
if err := p.renderClient.GetBuffer(frameSamples, &buffer); err != nil {
return
}
// Write frame to WASAPI buffer
bufSlice := unsafe.Slice(buffer, frameSamples*2)
for i := 0; i < frameSamples; i++ {
binary.LittleEndian.PutUint16(bufSlice[i*2:], uint16(frame[i]))
}
p.renderClient.ReleaseBuffer(frameSamples, 0)
default:
// No audio available - optionally write silence
// (skip for now to avoid crackling)
}
}