转载:http://www.unitymanual.com/thread-42180-1-1.html
Hello!大家好,我是爱国者,很高兴又在游戏蛮牛论坛和大家见面了。这一期我将会给大家带来unity5的干货,也就是大家最关注的unity5中的新功能混音器DSP插件的编写。那么何为DSP插件呢?说到DSP插件,那我先说说是什么是DSP,其实很简单就是Audio
Mixer的音频效果,也就是混音器的各种属性,通过添加不同条件的属性,就能够实现多种不同的音频效果,这就是DSP。 讲完了DSP,那我们就要来说说什么是DSP插件了。顾名思义DSP插件其实是对DSP效果的扩展,因为unity5自带的DSP效果实在是不能满足我们的极大需求,那么这里的话,我们就要通过编写插件的方式来达到我们需求的目的。 在unity5中编写混音器的DSP插件,我们需要二个步骤。第一个步骤下载unity5混音器插件的SDK,然后通过SDK编写C++程序编译成DSP插件的DLL文件,再然后第二个步骤是将我们编译好的DLL文件导入至unity5中,并且通过C#编写DSP插件的GUI界面,注意C#的类需要继承IAudioEffectPluginGUI这个基类才可以。总的步骤就是这两个。在SDK中我们其实用到的文件有两个,分别是AudioPluginInterface.h、AudioPluginUtil.h这两种头文件。其实除了这两个还有一个PluginList.h文件,用于最后生成DSP名称的,这里后期我将会教大家如何编写,我先把前面的这两个文件代码给大家,大家复制粘贴即可。代码如下:AudioPluginInterface.h头文件
#pragma once
#define UNITY_AUDIO_PLUGIN_API_VERSION 0x010300
#ifndef UNITY_PREFIX_CONFIGURE_H
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(_WIN64)
# define UNITY_WIN 1
#elif defined(__MACH__) || defined(__APPLE__)
# define UNITY_OSX 1
#elif defined(__ANDROID__)
# define UNITY_ANDROID 1
#elif defined(__linux__)
# define UNITY_LINUX 1
#elif defined(__PS3__)
# define UNITY_PS3 1
#elif defined(__SPU__)
# define UNITY_SPU 1
#endif
#if defined(_AMD64_) || defined(__LP64__)
# define UNITY_64 1
# define UNITY_32 0
#else
# define UNITY_64 0
# define UNITY_32 1
#endif
#ifndef SInt16_defined
# define SInt16_defined
typedef signed short SInt16;
#endif
#ifndef UInt16_defined
# define UInt16_defined
typedef unsigned short UInt16;
#endif
#ifndef UInt8_defined
# define UInt8_defined
typedef unsigned char UInt8;
#endif
#ifndef SInt8_defined
# define SInt8_defined
typedef signed char SInt8;
#endif
#if UNITY_64
# if UNITY_LINUX
# ifndef SInt32_defined
# define SInt32_defined
typedef signed int SInt32;
# endif
# ifndef UInt32_defined
# define UInt32_defined
typedef unsigned int UInt32;
# endif
# ifndef UInt64_defined
# define UInt64_defined
typedef unsigned long UInt64;
# endif
# ifndef SInt64_defined
# define SInt64_defined
typedef signed long SInt64;
# endif
# elif UNITY_OSX
# ifndef SInt32_defined
# define SInt32_defined
typedef signed int SInt32;
# endif
# ifndef UInt32_defined
# define UInt32_defined
typedef unsigned int UInt32;
# endif
# ifndef UInt64_defined
# define UInt64_defined
typedef unsigned long long UInt64;
# endif
# ifndef SInt64_defined
# define SInt64_defined
typedef signed long long SInt64;
# endif
# elif UNITY_WIN
# ifndef SInt32_defined
# define SInt32_defined
typedef signed long SInt32;
# endif
# ifndef UInt32_defined
# define UInt32_defined
typedef unsigned long UInt32;
# endif
# ifndef UInt64_defined
# define UInt64_defined
typedef unsigned long long UInt64;
# endif
# ifndef SInt64_defined
# define SInt64_defined
typedef signed long long SInt64;
# endif
#endif
#else
# ifndef SInt32_defined
# define SInt32_defined
typedef signed int SInt32;
# endif
# ifndef UInt32_defined
# define UInt32_defined
typedef unsigned int UInt32;
# endif
# ifndef UInt64_defined
# define UInt64_defined
typedef unsigned long long UInt64;
# endif
# ifndef SInt64_defined
# define SInt64_defined
typedef signed long long SInt64;
# endif
#endif
#endif
#if UNITY_WIN
#define UNITY_AUDIODSP_CALLBACK __stdcall
#elif UNITY_OSX
#define UNITY_AUDIODSP_CALLBACK
#else
#define UNITY_AUDIODSP_CALLBACK
#endif
// Attribute to make function be exported from a plugin
#if UNITY_WIN
#define UNITY_AUDIODSP_EXPORT_API __declspec(dllexport)
#else
#define UNITY_AUDIODSP_EXPORT_API
#endif
#if defined(__CYGWIN32__)
#define UNITY_AUDIODSP_CALLBACK __stdcall
#elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(_WIN64)
#define UNITY_AUDIODSP_CALLBACK __stdcall
#elif defined(__MACH__) || defined(__ANDROID__) || defined(__linux__) || defined(__QNX__)
#define UNITY_AUDIODSP_CALLBACK
#else
#define UNITY_AUDIODSP_CALLBACK
#endif
#define UNITY_AUDIODSP_RESULT int
#if !UNITY_SPU // asserts require _exit() to be defined
#include
#endif
enum
{
UNITY_AUDIODSP_OK = 0,
UNITY_AUDIODSP_ERR_UNSUPPORTED = 1,
};
struct UnityAudioEffectState;
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_CreateCallback)(UnityAudioEffectState* state);
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_ReleaseCallback)(UnityAudioEffectState* state);
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_ResetCallback)(UnityAudioEffectState* state);
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_ProcessCallback)(UnityAudioEffectState* state, float* inbuffer, float* outbuffer, unsigned int length, int inchannels, int outchannels);
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_SetPositionCallback)(UnityAudioEffectState* state, unsigned int pos);
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_SetFloatParameterCallback)(UnityAudioEffectState* state, int index, float value);
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_GetFloatParameterCallback)(UnityAudioEffectState* state, int index, float* value, char *valuestr);
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_GetFloatBufferCallback)(UnityAudioEffectState* state, const char* name, float* buffer, int numsamples);
enum UnityAudioEffectDefinitionFlags
{
UnityAudioEffectDefinitionFlags_IsSideChainTarget = 1 << 0, // Does this effect need a side chain buffer and can it be targeted by a Send?
UnityAudioEffectDefinitionFlags_IsSpatializer = 2 << 0, // Should this plugin be inserted at sources and take over panning?
};
enum UnityAudioEffectStateFlags
{
UnityAudioEffectStateFlags_IsPlaying = 1 << 0, // Set when engine is in play mode. Also true while paused.
UnityAudioEffectStateFlags_IsPaused = 1 << 1, // Set when engine is paused mode.
UnityAudioEffectStateFlags_IsMuted = 1 << 2, // Set when effect is being muted (only available in the editor)
UnityAudioEffectStateFlags_IsSideChainTarget = 1 << 3, // Does this effect need a side chain buffer and can it be targeted by a Send?
};
// This callback can be used to override the way distance attenuation is performed on AudioSources.
// distanceIn is the distance between the source and the listener and attenuationOut is the output volume.
// attenuationIn is the volume-curve based attenuation that would have been applied by Unity if this callback were not set.
// A typical attenuation curve may look like this: *attenuationOut = 1.0f / max(1.0f, distanceIn);
// The callback may also be used to apply a secondary gain on top of the one through attenuationIn by Unity's AudioSource curve.
typedef UNITY_AUDIODSP_RESULT (UNITY_AUDIODSP_CALLBACK * UnityAudioEffect_DistanceAttenuationCallback)(UnityAudioEffectState* state, float distanceIn, float attenuationIn, float* attenuationOut);
struct UnityAudioSpatializerData
{
float listenermatrix[16]; // Matrix that transforms sourcepos into the local space of the listener
float sourcematrix[16]; // Transform matrix of audio source
float spatialblend; // Distance-controlled spatial blend
float reverbzonemix; // Reverb zone mix level parameter (and curve) on audio source
float spread; // Spread parameter of the audio source (0..360 degrees)
float stereopan; // Stereo panning parameter of the audio source (-1 = fully left, 1 = fully right)
UnityAudioEffect_DistanceAttenuationCallback distanceattenuationcallback; // The spatializer plugin may override the distance attenuation in order to influence the voice prioritization (leave this callback as NULL to use the built-in audio source attenuation curve)
};
struct UnityAudioEffectState
{
union
{
struct
{
UInt32 structsize; // Size of this struct
UInt32 samplerate; // System sample rate
UInt64 currdsptick; // Pointer to a sample counter marking the start of the current block being processed
UInt64 prevdsptick; // Used for determining when DSPs are bypassed and so sidechain info becomes invalid
float* sidechainbuffer; // Side-chain buffers to read from
void* effectdata; // Internal data for the effect
UInt32 flags; // Various flags through which information can be queried from the host
void* internal; // Internal data, do not touch!
// Version 1.0 of the plugin API only contains data up to here, so perform a state->structsize >= sizeof(UnityAudioEffectState) in your code before you
// access any of this data in order to detect whether the host API is older than the plugin.
UnityAudioSpatializerData* spatializerdata; // Data for spatializers
UInt32 dspbuffersize; // Number of frames being processed per process callback. Use this to allocate temporary buffers before processing starts.
UInt32 hostapiversion; // Version of plugin API used by host
};
unsigned char pad[80]; // This entire structure must be a multiple of 16 bytes (and and instance 16 byte aligned) for PS3 SPU DMA requirements
};
#ifdef __cplusplus
template inline T* GetEffectData() const
{
#if !UNITY_SPU // asserts require _exit() to be defined
assert(effectdata);
assert(internal);
#endif
return (T*)effectdata;
}
#endif
};
struct UnityAudioParameterDefinition
{
char name[16]; // Display name on the GUI
char unit[16]; // Scientific unit of parameter to be appended after the value in textboxes
const char* description; // Description of parameter (displayed in tool tips, automatically generated documentation, etc.)
float min; // Minimum value of the parameter
float max; // Maximum value of the parameter
float defaultval; // Default and initial value of the parameter
float displayscale; // Scale factor used only for the display of parameters (i.e. 100 for a percentage value ranging from 0 to 1)
float displayexponent; // Exponent for mapping parameters to sliders
};
struct UnityAudioEffectDefinition
{
UInt32 structsize; // Size of this struct
UInt32 paramstructsize; // Size of paramdesc fields
UInt32 apiversion; // Plugin API version
UInt32 pluginversion; // Version of this plugin
UInt32 channels; // Number of channels. Effects should set this to 0 and process any number of input/output channels they get in the process callback. Generator elements should specify a >0 value here.
UInt32 numparameters; // The number of parameters exposed by this plugin.
UInt64 flags; // Various capabilities and requirements of the plugin.
char name[32]; // Name used for registration of the effect. This name will also be displayed in the GUI.
UnityAudioEffect_CreateCallback create; // The create callback is called when DSP unit is created and can be null.
UnityAudioEffect_ReleaseCallback release; // The release callback is called just before the plugin is freed and should free any data associated with this specific instance of the plugin. No further callbacks related to the instance will happen after this function has been called.
UnityAudioEffect_ResetCallback reset; // The reset callback is called by the user to bring back the plugin instance into its initial state. Use to avoid clicks or artifacts.
UnityAudioEffect_ProcessCallback process; // The processing callback is repeatedly called with a block of input audio to read from and an output block to write to.
UnityAudioEffect_SetPositionCallback setposition; // The position callback can be used for implementing seek operations.
UnityAudioParameterDefinition* paramdefs; // A pointer to the definitions of the parameters exposed by this plugin. This data pointed to must remain valid for the whole lifetime of the dynamic library (ideally it's static).
UnityAudioEffect_SetFloatParameterCallback setfloatparameter; // This is called whenever one of the exposed parameters is changed.
UnityAudioEffect_GetFloatParameterCallback getfloatparameter; // This is called to query parameter values.
UnityAudioEffect_GetFloatBufferCallback getfloatbuffer; // Get N samples of named buffer. Used for displaying analysis data from the runtime.
};
// This function fills in N pointers for the N effects contained in the library and returns N.
extern "C" UNITY_AUDIODSP_EXPORT_API int UnityGetAudioEffectDefinitions(UnityAudioEffectDefinition*** descptr);
AudioPluginUtil.h头文件
#pragma once
#include "AudioPluginInterface.h"
#include
#include
#include
#include
#include
#if UNITY_WIN
# include
#else
# if UNITY_SPU
# include
# include "ps3/AudioPluginInterfacePS3.h"
# else
# include
# endif
# define strcpy_s strcpy
#endif
typedef int (*InternalEffectDefinitionRegistrationCallback)(UnityAudioEffectDefinition& desc);
const float kMaxSampleRate = 22050.0f;
const float kPI = 3.141592653589f;
inline float FastClip(float x, float minval, float maxval) { return (fabsf(x - minval) - fabsf(x - maxval) + (minval + maxval)) * 0.5f; }
inline float FastMin(float a, float b) { return (a + b - fabsf(a - b)) * 0.5f; }
inline float FastMax(float a, float b) { return (a + b + fabsf(a - b)) * 0.5f; }
inline int FastFloor(float x) { return (int)floorf(x); } // TODO: Optimize
char* strnew(const char* src);
char* tmpstr(int index, const char* fmtstr, ...);
class UnityComplexNumber
{
public:
// No constructor because we want to be able to define this inside anonymous unions (this is also why we don't use std::complex here)
inline void Set(float _re, float _im)
{
re = _re;
im = _im;
}
inline void Set(const UnityComplexNumber& c)
{
re = c.re;
im = c.im;
}
inline static void Mul(const UnityComplexNumber& a, float b, UnityComplexNumber& result)
{
result.re = a.re * b;
result.im = a.im * b;
}
inline static void Mul(const UnityComplexNumber& a, const UnityComplexNumber& b, UnityComplexNumber& result)
{
// Store temporarily in case a or b reference the same memory as result
float t = a.re * b.im + a.im * b.re;
result.re = a.re * b.re - a.im * b.im;
result.im = t;
}
inline static void Add(const UnityComplexNumber& a, const UnityComplexNumber& b, UnityComplexNumber& result)
{
result.re = a.re + b.re;
result.im = a.im + b.im;
}
inline static void Sub(const UnityComplexNumber& a, const UnityComplexNumber& b, UnityComplexNumber& result)
{
result.re = a.re - b.re;
result.im = a.im - b.im;
}
inline const UnityComplexNumber operator *(float c) const
{
UnityComplexNumber result;
result.re = re * c;
result.im = im * c;
return result;
}
inline const UnityComplexNumber operator *(const UnityComplexNumber& c) const
{
UnityComplexNumber result;
result.re = re * c.re - im * c.im;
result.im = re * c.im + im * c.re;
return result;
}
inline const UnityComplexNumber operator +(const UnityComplexNumber& c) const
{
UnityComplexNumber result;
result.re = re + c.re;
result.im = im + c.im;
return result;
}
inline const UnityComplexNumber operator -(const UnityComplexNumber& c) const
{
UnityComplexNumber result;
result.re = re - c.re;
result.im = im - c.im;
return result;
}
inline float Magnitude() const
{
return sqrtf(re * re + im * im);
}
inline float Magnitude2() const
{
return re * re + im * im;
}
public:
float re, im;
};
class FFT
{
public:
static void Forward(UnityComplexNumber* data, int numsamples);
static void Backward(UnityComplexNumber* data, int numsamples);
};
class FFTAnalyzer : public FFT
{
public:
void Cleanup(); // Assumes zero-initialization
void AnalyzeInput(float* data, int numchannels, int numsamples, float specAlpha);
void AnalyzeOutput(float* data, int numchannels, int numsamples, float specAlpha);
void CheckInitialized();
bool CanBeRead() const;
void ReadBuffer(float* buffer, int numsamples, bool readInputBuffer);
public:
float* window;
float* ibuffer;
float* obuffer;
UnityComplexNumber* cspec;
float* ispec1;
float* ispec2;
float* ospec1;
float* ospec2;
int spectrumSize;
int numSpectraReady;
};
class HistoryBuffer
{
public:
HistoryBuffer();
~HistoryBuffer();
public:
void Init(int _length);
void ReadBuffer(float* buffer, int numsamplesTarget, int numsamplesSource, float offset);
public:
inline void Feed(float sample)
{
// Don't try to optimize this with ++
// The writeindex veriable may be read at the same time, so we don't want intermediate values indexing out of the array.
int w = writeindex + 1;
if (w == length)
w = 0;
data[w] = sample;
writeindex = w;
}
public:
int length;
int writeindex;
float* data;
};
template
class RingBuffer
{
public:
enum { LENGTH = _LENGTH };
volatile int readpos;
volatile int writepos;
T buffer[LENGTH];
inline bool Read(T& val)
{
int r = readpos;
if (r == writepos)
return false;
r = (r == LENGTH - 1) ? 0 : (r + 1);
val = buffer[r];
readpos = r;
return true;
}
inline void Skip(int num)
{
int r = readpos + num;
if (r >= LENGTH)
r -= LENGTH;
readpos = r;
}
inline void SyncWritePos()
{
writepos = readpos;
}
inline bool Feed(const T& input)
{
int w = (writepos == LENGTH - 1) ? 0 : (writepos + 1);
buffer[w] = input;
writepos = w;
return true;
}
inline int GetNumBuffered() const
{
int b = writepos - readpos;
if (b < 0)
b += LENGTH;
return b;
}
inline void Clear()
{
writepos = 0;
readpos = 0;
}
};
class BiquadFilter
{
public:
inline void SetupPeaking(float cutoff, float samplerate, float gain, float Q);
inline void SetupLowShelf(float cutoff, float samplerate, float gain, float Q);
inline void SetupHighShelf(float cutoff, float samplerate, float gain, float Q);
inline void SetupLowpass(float cutoff, float samplerate, float Q);
inline void SetupHighpass(float cutoff, float samplerate, float Q);
public:
inline float Process(float input)
{
float iir = input - a1 * z1 - a2 * z2;
float fir = b0 * iir + b1 * z1 + b2 * z2;
z2 = z1;
z1 = iir;
return fir;
}
inline void StoreCoeffs(float*& data)
{
*data++ = b2;
*data++ = b1;
*data++ = b0;
*data++ = a2;
*data++ = a1;
}
protected:
float a1, a2, b0, b1, b2;
float z1, z2;
};
// The filter coefficient formulae below are taken from Robert Bristow-Johnsons excellent EQ biquad filter cookbook:
// [url=http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt]http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt[/url]
void BiquadFilter::SetupPeaking(float cutoff, float samplerate, float gain, float Q)
{
float w0 = 2.0f * kPI * cutoff / samplerate, A = powf(10.0f, gain * 0.025f), alpha = sinf(w0) / (2.0f * Q), a0;
b0 = 1.0f + alpha * A;
b1 = -2.0f * cosf(w0);
b2 = 1.0f - alpha * A;
a0 = 1.0f + alpha / A;
a1 = -2.0f * cosf(w0);
a2 = 1.0f - alpha / A;
float inv_a0 = 1.0f / a0; a1 *= inv_a0; a2 *= inv_a0; b0 *= inv_a0; b1 *= inv_a0; b2 *= inv_a0;
}
void BiquadFilter::SetupLowShelf(float cutoff, float samplerate, float gain, float Q)
{
float w0 = 2.0f * kPI * cutoff / samplerate, A = powf(10.0f, gain * 0.025f), alpha = sinf(w0) / (2.0f * Q), a0;
b0 = A * ((A + 1.0f) - (A - 1.0f) * cosf(w0) + 2.0f * sqrtf(A) * alpha);
b1 = 2.0f * A * ((A - 1.0f) - (A + 1.0f) * cosf(w0));
b2 = A * ((A + 1.0f) - (A - 1.0f) * cosf(w0) - 2.0f * sqrtf(A) * alpha);
a0 = (A + 1.0f) + (A - 1.0f) * cosf(w0) + 2.0f * sqrtf(A) * alpha;
a1 = -2.0f * ((A - 1.0f) + (A + 1.0f) * cosf(w0));
a2 = (A + 1.0f) + (A - 1.0f) * cosf(w0) - 2.0f * sqrtf(A) * alpha;
float inv_a0 = 1.0f / a0; a1 *= inv_a0; a2 *= inv_a0; b0 *= inv_a0; b1 *= inv_a0; b2 *= inv_a0;
}
void BiquadFilter::SetupHighShelf(float cutoff, float samplerate, float gain, float Q)
{
float w0 = 2.0f * kPI * cutoff / samplerate, A = powf(10.0f, gain * 0.025f), alpha = sinf(w0) / (2.0f * Q), a0;
b0 = A * ((A + 1.0f) + (A - 1.0f) * cosf(w0) + 2.0f * sqrtf(A) * alpha);
b1 = -2.0f * A * ((A - 1.0f) + (A + 1.0f) * cosf(w0));
b2 = A * ((A + 1.0f) + (A - 1.0f) * cosf(w0) - 2.0f * sqrtf(A) * alpha);
a0 = (A + 1.0f) - (A - 1.0f) * cosf(w0) + 2.0f * sqrtf(A) * alpha;
a1 = 2.0f * ((A - 1.0f) - (A + 1.0f) * cosf(w0));
a2 = (A + 1.0f) - (A - 1.0f) * cosf(w0) - 2.0f * sqrtf(A) * alpha;
float inv_a0 = 1.0f / a0; a1 *= inv_a0; a2 *= inv_a0; b0 *= inv_a0; b1 *= inv_a0; b2 *= inv_a0;
}
void BiquadFilter::SetupLowpass(float cutoff, float samplerate, float Q)
{
float w0 = 2.0f * kPI * cutoff / samplerate, alpha = sinf(w0) / (2.0f * Q), a0;
b0 = (1.0f - cosf(w0)) * 0.5f;
b1 = 1.0f - cosf(w0);
b2 = (1.0f - cosf(w0)) * 0.5f;
a0 = 1.0f + alpha;
a1 = -2.0f * cosf(w0);
a2 = 1.0f - alpha;
float inv_a0 = 1.0f / a0; a1 *= inv_a0; a2 *= inv_a0; b0 *= inv_a0; b1 *= inv_a0; b2 *= inv_a0;
}
void BiquadFilter::SetupHighpass(float cutoff, float samplerate, float Q)
{
float w0 = 2.0f * kPI * cutoff / samplerate, alpha = sinf(w0) / (2.0f * Q), a0;
b0 = (1.0f + cosf(w0)) * 0.5f;
b1 = -(1.0f + cosf(w0));
b2 = (1.0f + cosf(w0)) * 0.5f;
a0 = 1.0f + alpha;
a1 = -2.0f * cosf(w0);
a2 = 1.0f - alpha;
float inv_a0 = 1.0f / a0; a1 *= inv_a0; a2 *= inv_a0; b0 *= inv_a0; b1 *= inv_a0; b2 *= inv_a0;
}
class StateVariableFilter
{
public:
float cutoff;
float bandwidth;
public:
inline float ProcessHPF(float input)
{
input += 1.0e-11f; // Kill denormals
lpf += cutoff * bpf;
float hpf = (input - bpf) * bandwidth - lpf;
bpf += cutoff * hpf;
lpf += cutoff * bpf;
hpf = (input - bpf) * bandwidth - lpf;
bpf += cutoff * hpf;
return hpf;
}
inline float ProcessBPF(float input)
{
ProcessHPF(input);
return bpf;
}
inline float ProcessLPF(float input)
{
ProcessHPF(input);
return lpf;
}
public:
float lpf, bpf;
};
class Random
{
public:
inline void Seed(unsigned long _seed)
{
seed = _seed;
}
inline unsigned int Get()
{
seed = (seed * 1664525 + 1013904223) & 0xFFFFFFFF;
return seed ^ (seed >> 16);
}
inline float GetFloat(float minval, float maxval)
{
return minval + (maxval - minval) * (Get() & 0xFFFFFF) * (const float)(1.0f / (float)0xFFFFFF);
}
protected:
unsigned int seed;
};
class NoiseGenerator
{
public:
void Init()
{
level = 0.0f;
delta = 0.0f;
minval = 0.0f;
maxval = 1.0f;
period = 100.0f;
invperiod = 0.01f;
samplesleft = 0;
}
inline void SetRange(float minval, float maxval)
{
this->minval = minval;
this->maxval = maxval;
}
inline void SetPeriod(float period)
{
SetPeriod(period, 1.0f / period);
}
inline void SetPeriod(float period, float invperiod)
{
period = period;
invperiod = invperiod;
}
inline float Sample(Random& random)
{
if (--samplesleft <= 0)
{
samplesleft = (int)period;
delta = (random.GetFloat(minval, maxval) - level) * invperiod;
}
level += delta;
return level;
}
public:
float level;
float delta;
float minval;
float maxval;
float period;
float invperiod;
int samplesleft;
};
class Mutex
{
public:
Mutex();
~Mutex();
public:
bool TryLock();
void Lock();
void Unlock();
protected:
#if UNITY_WIN
CRITICAL_SECTION crit_sec;
#else
# if !UNITY_SPU
pthread_mutex_t mutex;
# endif
#endif
};
class MutexScopeLock
{
public:
MutexScopeLock(Mutex& _mutex, bool condition = true) : mutex(condition ? &_mutex : NULL) { if (mutex != NULL) mutex->Lock(); }
~MutexScopeLock() { if (mutex != NULL) mutex->Unlock(); }
protected:
Mutex* mutex;
};
void RegisterParameter(
UnityAudioEffectDefinition& desc,
const char* name,
const char* unit,
float minval,
float maxval,
float defaultval,
float displayscale,
float displayexponent,
int enumvalue,
const char* description = NULL
);
void InitParametersFromDefinitions(
InternalEffectDefinitionRegistrationCallback registereffectdefcallback,
float* params
);
void DeclareEffect(
UnityAudioEffectDefinition& desc,
const char* name,
UnityAudioEffect_CreateCallback createcallback,
UnityAudioEffect_ReleaseCallback releasecallback,
UnityAudioEffect_ProcessCallback processcallback,
UnityAudioEffect_SetFloatParameterCallback setfloatparametercallback,
UnityAudioEffect_GetFloatParameterCallback getfloatparametercallback,
UnityAudioEffect_GetFloatBufferCallback getfloatbuffercallback,
InternalEffectDefinitionRegistrationCallback registereffectdefcallback
);
AudioPluginInterface.h这个文件是音频插件接口中的最基本的文件,负责混音器底层的音频信息和效果处理等,而AudioPluginUtil.h是基于AudioPluginInterface.h上的,主要是负责音频参数的传递。
首先我们来看AudioPluginUtil.h中自定义函数RegisterParameter,此函数是比较重要的,这个函数是注册我们DSP属性里的值,比如注册音量。此函数有10个值,在其中有1个可选值。 1.UnityAudioEffectDefinition& desc(UnityAudioEffectDefinition结构体引用的参数)
2.const char* name(这个属性值的名称)
3.const char* unit(这个属性的类型名)
4.float minval(这个属性的最小值)
5.float maxval(这个属性的最大值)
6.float defaultval(这个属性的默认值)
7.float displayscale(这个属性的显示比例)
8.float displayexponent(这个属性的显示指数)
9.enumvalue(枚举值)
10.const char* description = NULL(属性的描述,可为空)
其中UnityAudioEffectDefinition这个函数是定义音频效果的,里面的成员变量比较多,但基本上都是有用的。好了,这节课主要是说怎么创建此插件的,那么下节课我们正式编写此插件。下次再见了!