Implementing Audio Player on a Website
Native <audio> element works differently everywhere, can't be styled, gives no hooks for analytics or advanced UX. Custom player is built on Web Audio API or ready libraries.
Approach Selection
Three complexity levels:
Level 1 — styled audio — hide native element, draw custom controls over it. Minimal code, lose waveform progress bar.
Level 2 — library — Wavesurfer.js, Howler.js, Plyr. Wavesurfer draws waveform, Howler provides low-level control with sprite support, Plyr — beautiful UI for simple cases.
Level 3 — Web Audio API directly — full control: equalizer, frequency analyzer, effects. Used for music services, podcast platforms.
Wavesurfer.js: Player with Waveform Visualization
npm install wavesurfer.js
import WaveSurfer from 'wavesurfer.js'
import { useEffect, useRef, useState } from 'react'
interface AudioPlayerProps {
url: string
title: string
waveColor?: string
progressColor?: string
}
export function WaveAudioPlayer({
url, title,
waveColor = '#94a3b8',
progressColor = '#6366f1',
}: AudioPlayerProps) {
const containerRef = useRef<HTMLDivElement>(null)
const wsRef = useRef<WaveSurfer>()
const [playing, setPlaying] = useState(false)
const [currentTime, setCurrentTime] = useState(0)
const [duration, setDuration] = useState(0)
const [loading, setLoading] = useState(true)
useEffect(() => {
if (!containerRef.current) return
wsRef.current = WaveSurfer.create({
container: containerRef.current,
waveColor,
progressColor,
height: 64,
barWidth: 2,
barGap: 1,
barRadius: 2,
cursorWidth: 1,
cursorColor: progressColor,
normalize: true,
backend: 'WebAudio',
})
wsRef.current.load(url)
wsRef.current.on('ready', () => {
setDuration(wsRef.current!.getDuration())
setLoading(false)
})
wsRef.current.on('audioprocess', () => {
setCurrentTime(wsRef.current!.getCurrentTime())
})
wsRef.current.on('finish', () => setPlaying(false))
wsRef.current.on('error', (err) => {
console.error('WaveSurfer error:', err)
setLoading(false)
})
return () => wsRef.current?.destroy()
}, [url, waveColor, progressColor])
const togglePlay = () => {
wsRef.current?.playPause()
setPlaying(p => !p)
}
const formatTime = (sec: number) => {
const m = Math.floor(sec / 60)
const s = Math.floor(sec % 60)
return `${m}:${s.toString().padStart(2, '0')}`
}
return (
<div className="audio-player">
<div className="audio-player__meta">
<span>{title}</span>
<span>{formatTime(currentTime)} / {formatTime(duration)}</span>
</div>
{loading && <div className="audio-player__loading">Loading...</div>}
<div ref={containerRef} className="audio-player__wave" />
<button onClick={togglePlay} disabled={loading} aria-label={playing ? 'Pause' : 'Play'}>
{playing ? '⏸' : '▶'}
</button>
</div>
)
}
Pre-generating Peaks on Server
Downloading full audio file to draw waveform is expensive. Generate peaks once on upload:
// Laravel: generate peaks via audiowaveform (C++ utility)
// Install: apt-get install audiowaveform
use Illuminate\Support\Facades\Process;
class AudioService
{
public function generatePeaks(string $audioPath): array
{
$jsonPath = sys_get_temp_dir() . '/' . uniqid() . '.json';
$result = Process::run([
'audiowaveform',
'-i', $audioPath,
'-o', $jsonPath,
'--bits', '8',
'--pixels-per-second', '20',
]);
if ($result->failed()) {
throw new \RuntimeException('audiowaveform failed: ' . $result->errorOutput());
}
$data = json_decode(file_get_contents($jsonPath), true);
unlink($jsonPath);
return $data['data'] ?? [];
}
}
// Pass peaks via props — no audio download for drawing
wsRef.current = WaveSurfer.create({
container: containerRef.current,
peaks: track.peaks, // array from DB
duration: track.duration,
// Audio loads only on Play click
})
wsRef.current.on('interaction', () => {
wsRef.current!.load(url)
})
Howler.js: Sprites and Multi-track Management
npm install howler @types/howler
import { Howl, Howler } from 'howler'
// Global volume control
Howler.volume(0.8)
// Audio sprite — one file, multiple sounds (for games, UI sounds)
const sprite = new Howl({
src: ['/sounds/ui-sounds.webm', '/sounds/ui-sounds.mp3'],
sprite: {
click: [0, 150],
success: [300, 800],
error: [1200, 500],
notification: [1800, 1200],
},
})
sprite.play('success')
// Player with queue
class AudioQueue {
private queue: string[] = []
private current: Howl | null = null
private currentIndex = 0
constructor(tracks: string[]) {
this.queue = tracks
}
play(index = this.currentIndex) {
this.current?.stop()
this.currentIndex = index
this.current = new Howl({
src: [this.queue[index]],
html5: true, // streaming for large files
onend: () => this.next(),
onloaderror: (id, err) => console.error('Load error:', err),
})
this.current.play()
}
next() {
if (this.currentIndex < this.queue.length - 1) {
this.play(this.currentIndex + 1)
}
}
prev() {
if (this.currentIndex > 0) {
this.play(this.currentIndex - 1)
}
}
}
Frequency Visualizer via Web Audio API
function createFrequencyVisualizer(audioElement: HTMLAudioElement, canvas: HTMLCanvasElement) {
const ctx = new AudioContext()
const source = ctx.createMediaElementSource(audioElement)
const analyser = ctx.createAnalyser()
analyser.fftSize = 256
source.connect(analyser)
analyser.connect(ctx.destination)
const bufferLength = analyser.frequencyBinCount // 128
const dataArray = new Uint8Array(bufferLength)
const canvasCtx = canvas.getContext('2d')!
function draw() {
requestAnimationFrame(draw)
analyser.getByteFrequencyData(dataArray)
canvasCtx.fillStyle = '#0f172a'
canvasCtx.fillRect(0, 0, canvas.width, canvas.height)
const barWidth = canvas.width / bufferLength * 2.5
let x = 0
for (let i = 0; i < bufferLength; i++) {
const barHeight = (dataArray[i] / 255) * canvas.height
const hue = (i / bufferLength) * 240 + 180
canvasCtx.fillStyle = `hsl(${hue}, 70%, 60%)`
canvasCtx.fillRect(x, canvas.height - barHeight, barWidth, barHeight)
x += barWidth + 1
}
}
draw()
}
Keyboard Shortcuts and Accessibility
function setupKeyboardControls(player: WaveSurfer) {
document.addEventListener('keydown', (e) => {
// Only if focus not in input/textarea
if (['INPUT', 'TEXTAREA'].includes((e.target as Element).tagName)) return
switch (e.code) {
case 'Space':
e.preventDefault()
player.playPause()
break
case 'ArrowLeft':
player.skip(-5)
break
case 'ArrowRight':
player.skip(5)
break
case 'ArrowUp':
player.setVolume(Math.min(1, player.getVolume() + 0.1))
break
case 'ArrowDown':
player.setVolume(Math.max(0, player.getVolume() - 0.1))
break
}
})
}
MediaSession API: OS and Bluetooth Integration
function setupMediaSession(track: { title: string; artist: string; artwork: string }) {
if (!('mediaSession' in navigator)) return
navigator.mediaSession.metadata = new MediaMetadata({
title: track.title,
artist: track.artist,
artwork: [
{ src: track.artwork, sizes: '512x512', type: 'image/webp' },
],
})
navigator.mediaSession.setActionHandler('play', () => wavesurfer.play())
navigator.mediaSession.setActionHandler('pause', () => wavesurfer.pause())
navigator.mediaSession.setActionHandler('previoustrack', () => queue.prev())
navigator.mediaSession.setActionHandler('nexttrack', () => queue.next())
navigator.mediaSession.setActionHandler('seekto', (details) => {
if (details.seekTime !== undefined) {
wavesurfer.seekTo(details.seekTime / wavesurfer.getDuration())
}
})
}
Tracks now controlled by headphone buttons, iOS/Android lock screen, Windows system media player.
Timeline
Simple Plyr player for one track — half day. Wavesurfer with visualization, peak pre-generation, MediaSession API — 2–3 days. Full music player with queue, playlists, visualizer — 1–1.5 weeks.







