Cleaned code up. Added OBS & 7tv ws support. Added dependency injection. App loads from yml file.
This commit is contained in:
97
Chat/Speech/AudioPlaybackEngine.cs
Normal file
97
Chat/Speech/AudioPlaybackEngine.cs
Normal file
@ -0,0 +1,97 @@
|
||||
using NAudio.Wave;
|
||||
using NAudio.Extras;
|
||||
using NAudio.Wave.SampleProviders;
|
||||
|
||||
public class AudioPlaybackEngine : IDisposable
|
||||
{
|
||||
public static readonly AudioPlaybackEngine Instance = new AudioPlaybackEngine(44100, 2);
|
||||
|
||||
private readonly IWavePlayer outputDevice;
|
||||
private readonly MixingSampleProvider mixer;
|
||||
public int SampleRate { get; }
|
||||
|
||||
private AudioPlaybackEngine(int sampleRate = 44100, int channelCount = 2)
|
||||
{
|
||||
SampleRate = sampleRate;
|
||||
outputDevice = new WaveOutEvent();
|
||||
|
||||
mixer = new MixingSampleProvider(WaveFormat.CreateIeeeFloatWaveFormat(sampleRate, channelCount));
|
||||
mixer.ReadFully = true;
|
||||
|
||||
outputDevice.Init(mixer);
|
||||
outputDevice.Play();
|
||||
}
|
||||
|
||||
private ISampleProvider ConvertToRightChannelCount(ISampleProvider? input)
|
||||
{
|
||||
if (input is null)
|
||||
throw new NullReferenceException(nameof(input));
|
||||
|
||||
if (input.WaveFormat.Channels == mixer.WaveFormat.Channels)
|
||||
{
|
||||
return input;
|
||||
}
|
||||
if (input.WaveFormat.Channels == 1 && mixer.WaveFormat.Channels == 2)
|
||||
{
|
||||
return new MonoToStereoSampleProvider(input);
|
||||
}
|
||||
throw new NotImplementedException("Not yet implemented this channel count conversion");
|
||||
}
|
||||
|
||||
public void PlaySound(string fileName)
|
||||
{
|
||||
var input = new AudioFileReader(fileName);
|
||||
AddMixerInput(new WdlResamplingSampleProvider(ConvertToRightChannelCount(new AutoDisposeFileReader(input)), SampleRate));
|
||||
}
|
||||
|
||||
public void PlaySound(NetworkWavSound sound)
|
||||
{
|
||||
AddMixerInput(new CachedWavProvider(sound));
|
||||
}
|
||||
|
||||
public ISampleProvider ConvertSound(IWaveProvider provider) {
|
||||
ISampleProvider? converted = null;
|
||||
if (provider.WaveFormat.Encoding == WaveFormatEncoding.Pcm) {
|
||||
if (provider.WaveFormat.BitsPerSample == 8) {
|
||||
converted = new Pcm8BitToSampleProvider(provider);
|
||||
} else if (provider.WaveFormat.BitsPerSample == 16) {
|
||||
converted = new Pcm16BitToSampleProvider(provider);
|
||||
} else if (provider.WaveFormat.BitsPerSample == 24) {
|
||||
converted = new Pcm24BitToSampleProvider(provider);
|
||||
} else if (provider.WaveFormat.BitsPerSample == 32) {
|
||||
converted = new Pcm32BitToSampleProvider(provider);
|
||||
}
|
||||
} else if (provider.WaveFormat.Encoding == WaveFormatEncoding.IeeeFloat) {
|
||||
if (provider.WaveFormat.BitsPerSample == 64) {
|
||||
converted = new WaveToSampleProvider64(provider);
|
||||
} else {
|
||||
converted = new WaveToSampleProvider(provider);
|
||||
}
|
||||
} else {
|
||||
throw new ArgumentException("Unsupported source encoding while adding to mixer.");
|
||||
}
|
||||
return ConvertToRightChannelCount(converted);
|
||||
}
|
||||
|
||||
public void AddMixerInput(ISampleProvider input)
|
||||
{
|
||||
mixer.AddMixerInput(input);
|
||||
}
|
||||
|
||||
public void AddMixerInput(IWaveProvider input)
|
||||
{
|
||||
mixer.AddMixerInput(input);
|
||||
}
|
||||
|
||||
public void RemoveMixerInput(ISampleProvider sound) {
|
||||
mixer.RemoveMixerInput(sound);
|
||||
}
|
||||
|
||||
public void AddOnMixerInputEnded(EventHandler<SampleProviderEventArgs> e) {
|
||||
mixer.MixerInputEnded += e;
|
||||
}
|
||||
|
||||
public void Dispose() {
|
||||
outputDevice.Dispose();
|
||||
}
|
||||
}
|
42
Chat/Speech/NetworkCachedSound.cs
Normal file
42
Chat/Speech/NetworkCachedSound.cs
Normal file
@ -0,0 +1,42 @@
|
||||
using NAudio.Wave;
|
||||
|
||||
public class NetworkWavSound
|
||||
{
|
||||
public byte[] AudioData { get; private set; }
|
||||
public WaveFormat WaveFormat { get; private set; }
|
||||
|
||||
public NetworkWavSound(string uri)
|
||||
{
|
||||
using (var mfr = new MediaFoundationReader(uri)) {
|
||||
WaveFormat = mfr.WaveFormat;
|
||||
|
||||
byte[] buffer = new byte[4096];
|
||||
int read = 0;
|
||||
using (var ms = new MemoryStream()) {
|
||||
while ((read = mfr.Read(buffer, 0, buffer.Length)) > 0)
|
||||
ms.Write(buffer, 0, read);
|
||||
AudioData = ms.ToArray();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class CachedWavProvider : IWaveProvider
|
||||
{
|
||||
private readonly NetworkWavSound _sound;
|
||||
private readonly RawSourceWaveStream _stream;
|
||||
|
||||
public WaveFormat WaveFormat { get => _sound.WaveFormat; }
|
||||
|
||||
|
||||
public CachedWavProvider(NetworkWavSound cachedSound)
|
||||
{
|
||||
_sound = cachedSound;
|
||||
_stream = new RawSourceWaveStream(new MemoryStream(_sound.AudioData), _sound.WaveFormat);
|
||||
}
|
||||
|
||||
public int Read(byte[] buffer, int offset, int count)
|
||||
{
|
||||
return _stream.Read(buffer, offset, count);
|
||||
}
|
||||
}
|
92
Chat/Speech/TTSPlayer.cs
Normal file
92
Chat/Speech/TTSPlayer.cs
Normal file
@ -0,0 +1,92 @@
|
||||
using NAudio.Wave;
|
||||
|
||||
public class TTSPlayer {
|
||||
private PriorityQueue<TTSMessage, int> _messages; // ready to play
|
||||
private PriorityQueue<TTSMessage, int> _buffer;
|
||||
private Mutex _mutex;
|
||||
private Mutex _mutex2;
|
||||
|
||||
public TTSPlayer() {
|
||||
_messages = new PriorityQueue<TTSMessage, int>();
|
||||
_buffer = new PriorityQueue<TTSMessage, int>();
|
||||
_mutex = new Mutex();
|
||||
_mutex2 = new Mutex();
|
||||
}
|
||||
|
||||
public void Add(TTSMessage message) {
|
||||
try {
|
||||
_mutex2.WaitOne();
|
||||
_buffer.Enqueue(message, message.Priority);
|
||||
} finally {
|
||||
_mutex2.ReleaseMutex();
|
||||
}
|
||||
}
|
||||
|
||||
public TTSMessage? ReceiveReady() {
|
||||
try {
|
||||
_mutex.WaitOne();
|
||||
if (_messages.TryDequeue(out TTSMessage? message, out int _)) {
|
||||
return message;
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
_mutex.ReleaseMutex();
|
||||
}
|
||||
}
|
||||
|
||||
public TTSMessage? ReceiveBuffer() {
|
||||
try {
|
||||
_mutex2.WaitOne();
|
||||
if (_buffer.TryDequeue(out TTSMessage? message, out int _)) {
|
||||
return message;
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
_mutex2.ReleaseMutex();
|
||||
}
|
||||
}
|
||||
|
||||
public void Ready(TTSMessage message) {
|
||||
try {
|
||||
_mutex.WaitOne();
|
||||
_messages.Enqueue(message, message.Priority);
|
||||
} finally {
|
||||
_mutex.ReleaseMutex();
|
||||
}
|
||||
}
|
||||
|
||||
public void RemoveAll() {
|
||||
try {
|
||||
_mutex2.WaitOne();
|
||||
_buffer.Clear();
|
||||
} finally {
|
||||
_mutex2.ReleaseMutex();
|
||||
}
|
||||
|
||||
try {
|
||||
_mutex.WaitOne();
|
||||
_messages.Clear();
|
||||
} finally {
|
||||
_mutex.ReleaseMutex();
|
||||
}
|
||||
}
|
||||
|
||||
public bool IsEmpty() {
|
||||
return _messages.Count == 0;
|
||||
}
|
||||
}
|
||||
|
||||
public class TTSMessage {
|
||||
public string? Voice { get; set; }
|
||||
public string? Channel { get; set; }
|
||||
public string? Username { get; set; }
|
||||
public string? Message { get; set; }
|
||||
public string? File { get; set; }
|
||||
public DateTime Timestamp { get; set; }
|
||||
public bool Moderator { get; set; }
|
||||
public bool Bot { get; set; }
|
||||
public IEnumerable<KeyValuePair<string, string>>? Badges { get; set; }
|
||||
public int Bits { get; set; }
|
||||
public int Priority { get; set; }
|
||||
public ISampleProvider? Audio { get; set; }
|
||||
}
|
Reference in New Issue
Block a user