feat: Implement TeamSpeak 3 client with connection management, event handling, and audio support.

This commit is contained in:
Jose Luis Montañes Ojados
2026-01-16 22:11:58 +01:00
parent c55ace0c00
commit f83f525600
7 changed files with 877 additions and 27 deletions

261
pkg/audio/capture.go Normal file
View File

@@ -0,0 +1,261 @@
package audio
import (
"encoding/binary"
"fmt"
"sync"
"time"
"unsafe"
"github.com/go-ole/go-ole"
"github.com/moutend/go-wca/pkg/wca"
)
const captureFrameSamples = 960 // 20ms at 48kHz
// Capturer handles WASAPI audio capture from microphone
type Capturer struct {
client *wca.IAudioClient
captureClient *wca.IAudioCaptureClient
waveFormat *wca.WAVEFORMATEX
bufferSize uint32
running bool
mu sync.Mutex
stopChan chan struct{}
// Callback for captured audio (called with 960-sample frames)
onAudio func(samples []int16)
// Sample accumulation buffer
sampleBuffer []int16
bufferMu sync.Mutex
// Current audio level (0-100)
currentLevel int
levelMu sync.RWMutex
}
// NewCapturer creates a new WASAPI audio capturer
func NewCapturer() (*Capturer, error) {
// Initialize COM (may already be initialized)
ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED)
// Get default capture endpoint (microphone)
var deviceEnumerator *wca.IMMDeviceEnumerator
if err := wca.CoCreateInstance(
wca.CLSID_MMDeviceEnumerator,
0,
wca.CLSCTX_ALL,
wca.IID_IMMDeviceEnumerator,
&deviceEnumerator,
); err != nil {
return nil, fmt.Errorf("failed to create device enumerator: %w", err)
}
defer deviceEnumerator.Release()
var device *wca.IMMDevice
if err := deviceEnumerator.GetDefaultAudioEndpoint(wca.ECapture, wca.EConsole, &device); err != nil {
return nil, fmt.Errorf("failed to get default capture device: %w", err)
}
defer device.Release()
// Activate audio client
var audioClient *wca.IAudioClient
if err := device.Activate(wca.IID_IAudioClient, wca.CLSCTX_ALL, nil, &audioClient); err != nil {
return nil, fmt.Errorf("failed to activate audio client: %w", err)
}
// Set up format for 48kHz mono 16-bit (TeamSpeak format)
waveFormat := &wca.WAVEFORMATEX{
WFormatTag: wca.WAVE_FORMAT_PCM,
NChannels: 1,
NSamplesPerSec: 48000,
WBitsPerSample: 16,
NBlockAlign: 2,
NAvgBytesPerSec: 96000,
CbSize: 0,
}
// Initialize in shared mode - 100ms buffer
duration := wca.REFERENCE_TIME(100 * 10000) // 100ms in 100-nanosecond units
if err := audioClient.Initialize(
wca.AUDCLNT_SHAREMODE_SHARED,
wca.AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM|wca.AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY,
duration,
0,
waveFormat,
nil,
); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to initialize audio client: %w", err)
}
// Get buffer size
var bufferSize uint32
if err := audioClient.GetBufferSize(&bufferSize); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to get buffer size: %w", err)
}
// Get capture client
var captureClient *wca.IAudioCaptureClient
if err := audioClient.GetService(wca.IID_IAudioCaptureClient, &captureClient); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to get capture client: %w", err)
}
return &Capturer{
client: audioClient,
captureClient: captureClient,
waveFormat: waveFormat,
bufferSize: bufferSize,
stopChan: make(chan struct{}),
sampleBuffer: make([]int16, 0, captureFrameSamples*50), // ~1 second buffer
}, nil
}
// SetCallback sets the callback for captured audio (receives 960-sample frames)
func (c *Capturer) SetCallback(fn func(samples []int16)) {
c.mu.Lock()
c.onAudio = fn
c.mu.Unlock()
}
// Start begins audio capture
func (c *Capturer) Start() error {
c.mu.Lock()
if c.running {
c.mu.Unlock()
return nil
}
c.running = true
c.stopChan = make(chan struct{}) // Recreate channel for each start
c.mu.Unlock()
if err := c.client.Start(); err != nil {
return fmt.Errorf("failed to start audio client: %w", err)
}
go c.captureLoop()
return nil
}
// Stop stops audio capture
func (c *Capturer) Stop() {
c.mu.Lock()
if !c.running {
c.mu.Unlock()
return
}
c.running = false
c.mu.Unlock()
close(c.stopChan)
c.client.Stop()
}
// Close releases all resources
func (c *Capturer) Close() {
c.Stop()
if c.captureClient != nil {
c.captureClient.Release()
}
if c.client != nil {
c.client.Release()
}
}
// GetLevel returns the current audio input level (0-100)
func (c *Capturer) GetLevel() int {
c.levelMu.RLock()
defer c.levelMu.RUnlock()
return c.currentLevel
}
// IsRunning returns whether capture is active
func (c *Capturer) IsRunning() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.running
}
func (c *Capturer) captureLoop() {
ticker := time.NewTicker(10 * time.Millisecond) // Check more often than 20ms
defer ticker.Stop()
for {
select {
case <-c.stopChan:
return
case <-ticker.C:
c.readFromBuffer()
}
}
}
func (c *Capturer) readFromBuffer() {
// Read all available packets
for {
var packetLength uint32
if err := c.captureClient.GetNextPacketSize(&packetLength); err != nil {
return
}
if packetLength == 0 {
break
}
var buffer *byte
var numFrames uint32
var flags uint32
if err := c.captureClient.GetBuffer(&buffer, &numFrames, &flags, nil, nil); err != nil {
return
}
if numFrames == 0 {
c.captureClient.ReleaseBuffer(numFrames)
continue
}
samples := make([]int16, numFrames)
bufSlice := unsafe.Slice(buffer, numFrames*2)
for i := uint32(0); i < numFrames; i++ {
samples[i] = int16(binary.LittleEndian.Uint16(bufSlice[i*2:]))
}
c.captureClient.ReleaseBuffer(numFrames)
// Skip silent buffers
if flags&wca.AUDCLNT_BUFFERFLAGS_SILENT != 0 {
continue
}
// Add to sample buffer
c.bufferMu.Lock()
c.sampleBuffer = append(c.sampleBuffer, samples...)
// Calculate level from latest samples
level := CalculateRMSLevel(samples)
c.levelMu.Lock()
c.currentLevel = level
c.levelMu.Unlock()
// Send complete 960-sample frames
for len(c.sampleBuffer) >= captureFrameSamples {
frame := make([]int16, captureFrameSamples)
copy(frame, c.sampleBuffer[:captureFrameSamples])
c.sampleBuffer = c.sampleBuffer[captureFrameSamples:]
// Call callback with the frame
c.mu.Lock()
callback := c.onAudio
c.mu.Unlock()
if callback != nil {
callback(frame)
}
}
c.bufferMu.Unlock()
}
}

100
pkg/audio/level.go Normal file
View File

@@ -0,0 +1,100 @@
package audio
import (
"math"
)
// CalculateRMSLevel calculates the RMS level of PCM samples and returns 0-100
func CalculateRMSLevel(samples []int16) int {
if len(samples) == 0 {
return 0
}
var sum float64
for _, s := range samples {
sum += float64(s) * float64(s)
}
rms := math.Sqrt(sum / float64(len(samples)))
// Normalize to 0-100 (max int16 is 32767)
level := int(rms / 32767.0 * 100.0)
if level > 100 {
level = 100
}
return level
}
// CalculatePeakLevel returns the peak level of PCM samples as 0-100
func CalculatePeakLevel(samples []int16) int {
if len(samples) == 0 {
return 0
}
var peak int16
for _, s := range samples {
if s < 0 {
s = -s
}
if s > peak {
peak = s
}
}
return int(float64(peak) / 32767.0 * 100.0)
}
// LevelToBar converts a 0-100 level to a visual bar string
func LevelToBar(level, width int) string {
if level < 0 {
level = 0
}
if level > 100 {
level = 100
}
filled := level * width / 100
empty := width - filled
bar := ""
for i := 0; i < filled; i++ {
bar += "█"
}
for i := 0; i < empty; i++ {
bar += "░"
}
return bar
}
// LevelToMeter converts a 0-100 level to a visual VU meter with varying heights
func LevelToMeter(level, width int) string {
if level < 0 {
level = 0
}
if level > 100 {
level = 100
}
// Use block characters of varying heights
blocks := []rune{'░', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█'}
meter := ""
for i := 0; i < width; i++ {
// Each position represents a portion of the level
threshold := (i + 1) * 100 / width
if level >= threshold {
meter += string(blocks[8]) // Full
} else if level >= threshold-10 {
// Partial - calculate which block to use
partial := (level - (threshold - 10)) * 8 / 10
if partial < 0 {
partial = 0
}
meter += string(blocks[partial])
} else {
meter += string(blocks[0]) // Empty
}
}
return meter
}

286
pkg/audio/playback.go Normal file
View File

@@ -0,0 +1,286 @@
package audio
import (
"encoding/binary"
"fmt"
"sync"
"time"
"unsafe"
"github.com/go-ole/go-ole"
"github.com/moutend/go-wca/pkg/wca"
)
// Player handles WASAPI audio playback
type Player struct {
client *wca.IAudioClient
renderClient *wca.IAudioRenderClient
waveFormat *wca.WAVEFORMATEX
bufferSize uint32
volume float32
muted bool
mu sync.Mutex
running bool
stopChan chan struct{}
// Audio buffer - accumulates incoming audio
audioBuffer []int16
bufferMu sync.Mutex
// Frame queue (960 samples = 20ms at 48kHz)
frameQueue chan []int16
}
const frameSamples = 960 // 20ms at 48kHz
// NewPlayer creates a new WASAPI audio player
func NewPlayer() (*Player, error) {
// Initialize COM using go-ole
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED); err != nil {
// Ignore if already initialized
}
// Get default audio endpoint
var deviceEnumerator *wca.IMMDeviceEnumerator
if err := wca.CoCreateInstance(
wca.CLSID_MMDeviceEnumerator,
0,
wca.CLSCTX_ALL,
wca.IID_IMMDeviceEnumerator,
&deviceEnumerator,
); err != nil {
return nil, fmt.Errorf("failed to create device enumerator: %w", err)
}
defer deviceEnumerator.Release()
var device *wca.IMMDevice
if err := deviceEnumerator.GetDefaultAudioEndpoint(wca.ERender, wca.EConsole, &device); err != nil {
return nil, fmt.Errorf("failed to get default render device: %w", err)
}
defer device.Release()
// Activate audio client
var audioClient *wca.IAudioClient
if err := device.Activate(wca.IID_IAudioClient, wca.CLSCTX_ALL, nil, &audioClient); err != nil {
return nil, fmt.Errorf("failed to activate audio client: %w", err)
}
// Set up format for 48kHz mono 16-bit (TeamSpeak format)
waveFormat := &wca.WAVEFORMATEX{
WFormatTag: wca.WAVE_FORMAT_PCM,
NChannels: 1,
NSamplesPerSec: 48000,
WBitsPerSample: 16,
NBlockAlign: 2,
NAvgBytesPerSec: 96000,
CbSize: 0,
}
// Initialize in shared mode - 100ms buffer
duration := wca.REFERENCE_TIME(100 * 10000) // 100ms in 100-nanosecond units
if err := audioClient.Initialize(
wca.AUDCLNT_SHAREMODE_SHARED,
wca.AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM|wca.AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY,
duration,
0,
waveFormat,
nil,
); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to initialize audio client: %w", err)
}
// Get buffer size
var bufferSize uint32
if err := audioClient.GetBufferSize(&bufferSize); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to get buffer size: %w", err)
}
// Get render client
var renderClient *wca.IAudioRenderClient
if err := audioClient.GetService(wca.IID_IAudioRenderClient, &renderClient); err != nil {
audioClient.Release()
return nil, fmt.Errorf("failed to get render client: %w", err)
}
return &Player{
client: audioClient,
renderClient: renderClient,
waveFormat: waveFormat,
bufferSize: bufferSize,
volume: 1.0,
muted: false,
stopChan: make(chan struct{}),
audioBuffer: make([]int16, 0, frameSamples*50), // ~1 second buffer
frameQueue: make(chan []int16, 100), // ~2 seconds of frames
}, nil
}
// Start begins audio playback
func (p *Player) Start() error {
p.mu.Lock()
if p.running {
p.mu.Unlock()
return nil
}
p.running = true
p.mu.Unlock()
if err := p.client.Start(); err != nil {
return fmt.Errorf("failed to start audio client: %w", err)
}
// Playback loop writes frames from queue to WASAPI
go p.playbackLoop()
return nil
}
// Stop stops audio playback
func (p *Player) Stop() {
p.mu.Lock()
if !p.running {
p.mu.Unlock()
return
}
p.running = false
p.mu.Unlock()
close(p.stopChan)
p.client.Stop()
}
// Close releases all resources
func (p *Player) Close() {
p.Stop()
if p.renderClient != nil {
p.renderClient.Release()
}
if p.client != nil {
p.client.Release()
}
ole.CoUninitialize()
}
// PlayPCM queues PCM audio for playback
// Accumulates samples and queues complete 960-sample frames
func (p *Player) PlayPCM(samples []int16) {
if p.muted {
return
}
// Apply volume
adjusted := samples
if p.volume != 1.0 {
adjusted = make([]int16, len(samples))
for i, s := range samples {
adjusted[i] = int16(float32(s) * p.volume)
}
}
p.bufferMu.Lock()
p.audioBuffer = append(p.audioBuffer, adjusted...)
// Queue complete 960-sample frames
for len(p.audioBuffer) >= frameSamples {
frame := make([]int16, frameSamples)
copy(frame, p.audioBuffer[:frameSamples])
p.audioBuffer = p.audioBuffer[frameSamples:]
select {
case p.frameQueue <- frame:
default:
// Queue full, drop oldest frame
select {
case <-p.frameQueue:
default:
}
p.frameQueue <- frame
}
}
p.bufferMu.Unlock()
}
// SetVolume sets playback volume (0.0 to 1.0)
func (p *Player) SetVolume(vol float32) {
if vol < 0 {
vol = 0
}
if vol > 1.0 {
vol = 1.0
}
p.mu.Lock()
p.volume = vol
p.mu.Unlock()
}
// GetVolume returns current volume (0.0 to 1.0)
func (p *Player) GetVolume() float32 {
p.mu.Lock()
defer p.mu.Unlock()
return p.volume
}
// SetMuted sets mute state
func (p *Player) SetMuted(muted bool) {
p.mu.Lock()
p.muted = muted
p.mu.Unlock()
}
// IsMuted returns mute state
func (p *Player) IsMuted() bool {
p.mu.Lock()
defer p.mu.Unlock()
return p.muted
}
func (p *Player) playbackLoop() {
// Use 20ms ticker matching TeamSpeak frame rate
ticker := time.NewTicker(20 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-p.stopChan:
return
case <-ticker.C:
p.writeFrame()
}
}
}
func (p *Player) writeFrame() {
// Get current padding (samples already in buffer)
var padding uint32
if err := p.client.GetCurrentPadding(&padding); err != nil {
return
}
available := p.bufferSize - padding
if available < frameSamples {
return // Not enough space for a full frame
}
// Try to get a frame from the queue
select {
case frame := <-p.frameQueue:
var buffer *byte
if err := p.renderClient.GetBuffer(frameSamples, &buffer); err != nil {
return
}
// Write frame to WASAPI buffer
bufSlice := unsafe.Slice(buffer, frameSamples*2)
for i := 0; i < frameSamples; i++ {
binary.LittleEndian.PutUint16(bufSlice[i*2:], uint16(frame[i]))
}
p.renderClient.ReleaseBuffer(frameSamples, 0)
default:
// No audio available - optionally write silence
// (skip for now to avoid crackling)
}
}

View File

@@ -269,6 +269,14 @@ func (c *Client) IsConnected() bool {
return c.connected
}
// GetPing returns the current RTT in milliseconds
func (c *Client) GetPing() float64 {
if c.internal == nil {
return 0
}
return c.internal.PingRTT
}
// handleInternalEvent processes events from the internal client
func (c *Client) handleInternalEvent(eventType string, data map[string]any) {
switch eventType {