-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAudioSnapshotTestingTests.swift
More file actions
258 lines (234 loc) · 8.32 KB
/
AudioSnapshotTestingTests.swift
File metadata and controls
258 lines (234 loc) · 8.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
import Testing
import AVFAudio
@testable import AudioSnapshotTesting
@Test(
.audioSnapshot(record: false, strategy: .waveform(width: 3000, height: 800)),
arguments: ["sine", "triangle", "square", "sawtooth", "brown", "pink", "white"]
)
func fileWaveform(wave: String) async throws {
await assertAudioSnapshot(
of: try AVAudioPCMBuffer.read(wave: wave),
named: "fileWaveform.\(wave)"
)
}
@Test(
.audioSnapshot(record: false, strategy: .waveform(width: 800, height: 400, strategy: .individualLines))
)
func fileWaveformMetronome() async throws {
await assertAudioSnapshot(
of: try AVAudioPCMBuffer.read(wave: "metronome"),
named: "fileWaveformMetronome.metronome"
)
}
@Test(
.audioSnapshot(record: false, strategy: .waveform(width: 4000, height: 1000, strategy: .individualLines, mono: false))
)
func stereoFileWaveform() async throws {
await assertAudioSnapshot(
of: try AVAudioPCMBuffer.read(wave: "left-right-metronome"),
named: "stereoFileWaveform"
)
}
@Test(
.audioSnapshot(record: false, strategy: .waveform(width: 4000, height: 1000))
)
func fileWaveformOverlay() async throws {
let buffer1 = try AVAudioPCMBuffer.read(wave: "sine")
let buffer2 = try AVAudioPCMBuffer.read(wave: "square")
await assertAudioSnapshot(
of: [buffer1, buffer2],
named: "fileWaveformOverlay.square-over-sine"
)
}
@available(iOS 16, macOS 13, *)
@Test(
.audioSnapshot(record: false),
arguments: ["1hz@32768", "1hz2hz@32768", "2hz@32768"]
)
func perfectSpectrum(wave: String) async throws {
let buffer = try AVAudioPCMBuffer.read(wave: wave)
// Override with custom window for perfect spectrum analysis
await AudioSnapshotConfiguration.$current.withValue(
AudioSnapshotTrait(record: false, strategy: .spectrum(
width: 500,
height: 200,
window: .init(repeating: 1, count: Int(buffer.frameLength))
))
) {
await assertAudioSnapshot(of: buffer, named: "perfectSpectrum.\(wave)")
}
}
@available(iOS 16, macOS 13, *)
@Test(
.audioSnapshot(record: false, strategy: .spectrum(width: 1500, height: 400)),
arguments: ["1hz@44100", "white", "brown", "pink", "square", "triangle", "sawtooth"]
)
func windowedSpectrum(wave: String) async throws {
let buffer = try AVAudioPCMBuffer.read(wave: wave)
await assertAudioSnapshot(
of: buffer,
named: "windowedSpectrum.\(wave)"
)
}
@available(iOS 16, macOS 13, *)
@Test(
.audioSnapshot(record: false, strategy: .spectrum(width: 1500, height: 400, window: .init(repeating: 1, count: 32768)))
)
func spectrumSynthesised() async throws {
let signal = synthesizeSignal(frequencyAmplitudePairs: (0..<1024).map { (Float($0), Float($0) / Float(1024)) }, count: 32768)
let buffer = createBuffer(from: signal)
await assertAudioSnapshot(
of: buffer,
named: "spectrumSynthesised.synthesized"
)
}
@available(iOS 16, macOS 13, *)
@Test(
"Generates color spectrum spectrogram",
.audioSnapshot(
record: false,
strategy: .spectrogram(hopSize: 128, frequencyCount: 1024, window: [Float](repeating: 1, count: 2048), amplitudeScale: .linear, imageWidth: 200)
)
)
func spectrogramColors() async throws {
let frequencyCount = 1024
let signal = synthesizeSignal(
frequencyAmplitudePairs: (0..<frequencyCount).map { (Float($0), Float($0) / Float(frequencyCount)) },
count: 2048
)
let buffer = createBuffer(from: signal + signal + signal + signal)
await assertAudioSnapshot(of: buffer, named: "spectrogramColors")
}
@available(iOS 16, macOS 13, *)
@Test(
.audioSnapshot(record: false, strategy: .spectrogram(hopSize: 4096, frequencyCount: 2048)),
arguments: [
"500hz",
"2000hz",
"5000hz",
"15000hz",
"20000hz",
"chirp",
"440-880-1320hz",
"beating",
"fade-in-out",
"ultra-high-tones",
"high-sweep",
"spring"
]
)
func spectrogramHop256(wave: String) async throws {
let buffer = try AVAudioPCMBuffer.read(wave: wave)
await assertAudioSnapshot(
of: buffer,
named: "spectrogramHop256.\(wave)"
)
}
@Test(
"Audio snapshot of synthesized signal",
.audioSnapshot(record: false, strategy: .waveform(width: 1000, height: 300))
)
func audioSnapshotSynthesized() async throws {
let signal = synthesizeSignal(
frequencyAmplitudePairs: [(440, 0.5), (880, 0.3)],
count: 4410
)
let buffer = createBuffer(from: signal)
await assertAudioSnapshot(
of: buffer,
named: "audioSnapshotSynthesized.440-880hz"
)
}
@available(iOS 16, macOS 13, *)
@Test(
"Multi-channel (4ch) audio comparison",
.audioSnapshot(record: false, strategy: .spectrogram(hopSize: 4096, frequencyCount: 2048))
)
func multiChannelComparison() async throws {
let frameCount = 44100
let frequencies: [Float] = [440, 880, 1320, 1760]
let channels = frequencies.map { frequency in
synthesizeSignal(frequencyAmplitudePairs: [(frequency, 0.5)], count: frameCount)
}
let buffer = createBuffer(channels: channels, sampleRate: 44100)
await assertAudioSnapshot(of: buffer, named: "multiChannelComparison.4ch")
}
@Test(
"Checksum snapshot records and verifies a deterministic buffer",
.audioSnapshot(record: false, format: .checksum)
)
func checksumRoundTrip() async throws {
let signal = synthesizeSignal(
frequencyAmplitudePairs: [(440, 0.5)],
count: 4410
)
let buffer = createBuffer(from: signal)
await assertAudioSnapshot(of: buffer, named: "checksumRoundTrip.440hz")
}
@Test(
"Checksum snapshot with multiple buffers uses indexed naming",
.audioSnapshot(record: false, format: .checksum)
)
func checksumMultiBuffer() async throws {
let signal1 = synthesizeSignal(
frequencyAmplitudePairs: [(440, 0.5)],
count: 4410
)
let signal2 = synthesizeSignal(
frequencyAmplitudePairs: [(880, 0.3)],
count: 4410
)
let buffer1 = createBuffer(from: signal1)
let buffer2 = createBuffer(from: signal2)
await assertAudioSnapshot(
of: [buffer1, buffer2],
named: "checksumMultiBuffer"
)
}
private func createBuffer(from samples: [Float], sampleRate: Double = 32768) -> AVAudioPCMBuffer {
createBuffer(channels: [samples], sampleRate: sampleRate)
}
private func createBuffer(channels: [[Float]], sampleRate: Double = 32768) -> AVAudioPCMBuffer {
let channelCount = channels.count
let frameCount = channels.first?.count ?? 0
let format: AVAudioFormat
if channelCount <= 2 {
format = AVAudioFormat(standardFormatWithSampleRate: sampleRate, channels: AVAudioChannelCount(channelCount))!
} else {
let layout = AVAudioChannelLayout(layoutTag: kAudioChannelLayoutTag_DiscreteInOrder | UInt32(channelCount))!
format = AVAudioFormat(standardFormatWithSampleRate: sampleRate, channelLayout: layout)
}
let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(frameCount))!
for (channelIndex, samples) in channels.enumerated() {
let channelData = buffer.floatChannelData![channelIndex]
for (frameIndex, sample) in samples.enumerated() {
channelData[frameIndex] = sample
}
}
buffer.frameLength = AVAudioFrameCount(frameCount)
return buffer
}
private extension AVAudioPCMBuffer {
static func read(wave: String) throws -> AVAudioPCMBuffer {
let file = try AVAudioFile(
forReading: Bundle.module.url(forResource: wave, withExtension: "wav")!
)
let buffer = try #require(AVAudioPCMBuffer(pcmFormat: file.processingFormat, frameCapacity: AVAudioFrameCount(file.length)))
try file.read(into: buffer)
return buffer
}
}
/// Synthesize a signal composed of multiple sine waves given frequency-amplitude pairs.
private func synthesizeSignal(
frequencyAmplitudePairs: [(f: Float, a: Float)],
count: Int
) -> [Float] {
let tau: Float = .pi * 2
let signal: [Float] = (0 ..< count).map { index in
frequencyAmplitudePairs.reduce(0) { accumulator, frequenciesAmplitudePair in
let normalizedIndex = Float(index) / Float(count)
return accumulator + sin(normalizedIndex * frequenciesAmplitudePair.f * tau) * frequenciesAmplitudePair.a
}
}
return signal
}