additional sound type for local brake, support for combined sounds

This commit is contained in:
tmj-fstate
2017-12-14 14:31:34 +01:00
parent 428710b8e7
commit 4e6e428cea
12 changed files with 725 additions and 162 deletions

625
sound.cpp
View File

@@ -37,41 +37,85 @@ sound_source::deserialize( std::string const &Input, sound_type const Legacytype
sound_source &
sound_source::deserialize( cParser &Input, sound_type const Legacytype, int const Legacyparameters ) {
// TODO: implement block type config parsing
switch( Legacytype ) {
case sound_type::single: {
// single sample only
m_soundmain.buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
break;
}
case sound_type::multipart: {
// three samples: start, middle, stop
m_soundbegin.buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
m_soundmain.buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
m_soundend.buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
break;
}
default: {
break;
}
}
// cache parser config, as it may change during deserialization
auto const inputautoclear { Input.autoclear() };
if( Legacyparameters & sound_parameters::range ) {
Input.getTokens( 1, false );
Input >> m_range;
Input.getTokens( 1, true, "\n\r\t ,;" );
if( Input.peek() == "{" ) {
// block type config
while( true == deserialize_mapping( Input ) ) {
; // all work done by while()
}
if( false == m_soundchunks.empty() ) {
// arrange loaded sound chunks in requested order
std::sort(
std::begin( m_soundchunks ), std::end( m_soundchunks ),
[]( soundchunk_pair const &Left, soundchunk_pair const &Right ) {
return ( Left.second.threshold < Right.second.threshold ); } );
// calculate and cache full range points for each chunk, including crossfade sections:
// on the far end the crossfade section extends to the threshold point of the next chunk...
for( std::size_t idx = 0; idx < m_soundchunks.size() - 1; ++idx ) {
m_soundchunks[ idx ].second.fadeout = m_soundchunks[ idx + 1 ].second.threshold;
}
// ...and on the other end from the threshold point back into the range of previous chunk
for( std::size_t idx = 1; idx < m_soundchunks.size(); ++idx ) {
auto const previouschunkwidth { m_soundchunks[ idx ].second.threshold - m_soundchunks[ idx - 1 ].second.threshold };
m_soundchunks[ idx ].second.fadein = m_soundchunks[ idx ].second.threshold - 0.01f * m_crossfaderange * previouschunkwidth;
}
m_soundchunks.back().second.fadeout = std::max( m_soundchunks.back().second.threshold, 100 );
// test if the chunk table contains any actual samples while at it
for( auto &soundchunk : m_soundchunks ) {
if( soundchunk.first.buffer != null_handle ) {
m_soundchunksempty = false;
break;
}
}
}
}
if( Legacyparameters & sound_parameters::amplitude ) {
Input.getTokens( 2, false );
Input
>> m_amplitudefactor
>> m_amplitudeoffset;
}
if( Legacyparameters & sound_parameters::frequency ) {
Input.getTokens( 2, false );
Input
>> m_frequencyfactor
>> m_frequencyoffset;
else {
// legacy type config
// set the parser to preserve retrieved tokens, so we don't need to mess with separately passing the initial read
Input.autoclear( false );
switch( Legacytype ) {
case sound_type::single: {
// single sample only
m_sounds[ main ].buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
break;
}
case sound_type::multipart: {
// three samples: start, middle, stop
for( auto &sound : m_sounds ) {
sound.buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
}
break;
}
default: {
break;
}
}
if( Legacyparameters & sound_parameters::range ) {
Input.getTokens( 1, false );
Input >> m_range;
}
if( Legacyparameters & sound_parameters::amplitude ) {
Input.getTokens( 2, false );
Input
>> m_amplitudefactor
>> m_amplitudeoffset;
}
if( Legacyparameters & sound_parameters::frequency ) {
Input.getTokens( 2, false );
Input
>> m_frequencyfactor
>> m_frequencyoffset;
}
}
// restore parser behaviour
Input.autoclear( inputautoclear );
return *this;
}
@@ -85,11 +129,11 @@ sound_source::deserialize_filename( cParser &Input ) {
// simple case, single file
return token;
}
// if instead of filename we've encountered '[' this marks beginning of random sound set
// we retrieve all filenames from the set, then return a random one
// if instead of filename we've encountered '[' this marks a beginning of random sounds
// we retrieve all entries, then return a random one
std::vector<std::string> filenames;
while( ( ( token = Input.getToken<std::string>( true, "\n\r\t ,;" ) ) != "" )
&& ( token != "]" ) ) {
&& ( token != "]" ) ) {
filenames.emplace_back( token );
}
if( false == filenames.empty() ) {
@@ -102,6 +146,127 @@ sound_source::deserialize_filename( cParser &Input ) {
}
}
// imports member data pair from the config file
bool
sound_source::deserialize_mapping( cParser &Input ) {
// token can be a key or block end
std::string const key { Input.getToken<std::string>( true, "\n\r\t ,;[]" ) };
if( ( true == key.empty() ) || ( key == "}" ) ) { return false; }
// if not block end then the key is followed by assigned value or sub-block
if( key == "soundmain:" ) {
sound( sound_id::main ).buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
}
else if( key == "soundset:" ) {
deserialize_soundset( Input );
}
else if( key == "soundbegin:" ) {
sound( sound_id::begin ).buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
}
else if( key == "soundend:" ) {
sound( sound_id::end ).buffer = audio::renderer.fetch_buffer( deserialize_filename( Input ) );
}
else if( key.compare( 0, std::min<std::size_t>( key.size(), 5 ), "sound" ) == 0 ) {
// sound chunks, defined with key soundX where X = activation threshold
auto const indexstart { key.find_first_of( "1234567890" ) };
auto const indexend { key.find_first_not_of( "1234567890", indexstart ) };
if( indexstart != std::string::npos ) {
// NOTE: we'll sort the chunks at the end of deserialization
m_soundchunks.emplace_back(
soundchunk_pair {
// sound data
{ audio::renderer.fetch_buffer( deserialize_filename( Input ) ), 0 },
// chunk data
{ std::stoi( key.substr( indexstart, indexend - indexstart ) ), 0, 0, 1.f } } );
}
}
else if( key.compare( 0, std::min<std::size_t>( key.size(), 5 ), "pitch" ) == 0 ) {
// sound chunk pitch, defined with key pitchX where X = activation threshold
auto const indexstart { key.find_first_of( "1234567890" ) };
auto const indexend { key.find_first_not_of( "1234567890", indexstart ) };
if( indexstart != std::string::npos ) {
auto const index { std::stoi( key.substr( indexstart, indexend - indexstart ) ) };
auto const pitch { Input.getToken<float>( false, "\n\r\t ,;" ) };
for( auto &chunk : m_soundchunks ) {
if( chunk.second.threshold == index ) {
chunk.second.pitch = pitch;
break;
}
}
}
}
else if( key == "crossfade:" ) {
// for combined sounds, percentage of assigned range allocated to crossfade sections
Input.getTokens( 1, "\n\r\t ,;" );
Input >> m_crossfaderange;
m_crossfaderange = clamp( m_crossfaderange, 0, 100 );
}
else if( key == "placement:" ) {
auto const value { Input.getToken<std::string>( true, "\n\r\t ,;" ) };
std::map<std::string, sound_placement> const placements {
{ "internal", sound_placement::internal },
{ "engine", sound_placement::engine },
{ "external", sound_placement::external },
{ "general", sound_placement::general } };
auto lookup{ placements.find( value ) };
if( lookup != placements.end() ) {
m_placement = lookup->second;
}
}
else if( key == "offset:" ) {
// point in 3d space, in format [ x, y, z ]
Input.getTokens( 3, false, "\n\r\t ,;[]" );
Input
>> m_offset.x
>> m_offset.y
>> m_offset.z;
}
else {
// floating point properties
std::map<std::string, float &> const properties {
{ "frequencyfactor:", m_frequencyfactor },
{ "frequencyoffset:", m_frequencyoffset },
{ "amplitudefactor:", m_amplitudefactor },
{ "amplitudeoffset:", m_amplitudeoffset },
{ "range:", m_range } };
auto lookup { properties.find( key ) };
if( lookup != properties.end() ) {
Input.getTokens( 1, false, "\n\r\t ,;" );
Input >> lookup->second;
}
}
return true; // return value marks a [ key: value ] pair was extracted, nothing about whether it's recognized
}
// imports values for initial, main and ending sounds from provided data stream
void
sound_source::deserialize_soundset( cParser &Input ) {
auto token { Input.getToken<std::string>( true, "\n\r\t ,;|" ) };
if( token != "[" ) {
// simple case, basic set of three filenames separated with |
// three samples: start, middle, stop
sound( sound_id::begin ).buffer = audio::renderer.fetch_buffer( token );
sound( sound_id::main ).buffer = audio::renderer.fetch_buffer( Input.getToken<std::string>( true, "\n\r\t ,;|" ) );
sound( sound_id::end ).buffer = audio::renderer.fetch_buffer( Input.getToken<std::string>( true, "\n\r\t ,;|" ) );
return;
}
// if instead of filename we've encountered '[' this marks a beginning of random sets
// we retrieve all entries, then process a random one
std::vector<std::string> soundsets;
while( ( ( token = Input.getToken<std::string>( true, "\n\r\t ,;" ) ) != "" )
&& ( token != "]" ) ) {
soundsets.emplace_back( token );
}
if( false == soundsets.empty() ) {
std::shuffle( std::begin( soundsets ), std::end( soundsets ), Global::random_engine );
return deserialize_soundset( cParser( soundsets.front() ) );
}
}
// issues contextual play commands for the audio renderer
void
sound_source::play( int const Flags ) {
@@ -112,9 +277,11 @@ sound_source::play( int const Flags ) {
return;
}
if( m_range > 0 ) {
auto const cutoffrange{ m_range * 5 };
if( glm::length2( location() - glm::dvec3{ Global::pCameraPosition } ) > std::min( 2750.f * 2750.f, cutoffrange * cutoffrange ) ) {
// drop sounds from beyond sensible and/or audible range
auto const cutoffrange { m_range * 5 };
if( glm::length2( location() - glm::dvec3 { Global::pCameraPosition } ) > std::min( 2750.f * 2750.f, cutoffrange * cutoffrange ) ) {
// while we drop sounds from beyond sensible and/or audible range
// we act as if it was activated normally, meaning no need to include the opening bookend in subsequent calls
m_playbeginning = false;
return;
}
}
@@ -126,40 +293,106 @@ sound_source::play( int const Flags ) {
m_flags = Flags;
if( sound( sound_id::main ).buffer != null_handle ) {
// basic variant: single main sound, with optional bookends
play_basic();
}
else {
// combined variant, main sound consists of multiple chunks, with optional bookends
play_combined();
}
}
void
sound_source::play_basic() {
if( false == is_playing() ) {
// dispatch appropriate sound
// TODO: support for parameter-driven sound table
if( m_soundbegin.buffer != null_handle ) {
std::vector<audio::buffer_handle> bufferlist { m_soundbegin.buffer, m_soundmain.buffer };
insert( std::begin( bufferlist ), std::end( bufferlist ) );
if( ( true == m_playbeginning )
&& ( sound( sound_id::begin ).buffer != null_handle ) ) {
std::vector<sound_id> sounds { sound_id::begin, sound_id::main };
insert( std::begin( sounds ), std::end( sounds ) );
m_playbeginning = false;
}
else {
insert( m_soundmain.buffer );
insert( sound_id::main );
}
}
else {
if( ( m_soundbegin.buffer == null_handle )
// for single part non-looping samples we allow spawning multiple instances, if not prevented by set flags
if( ( sound( sound_id::begin ).buffer == null_handle )
&& ( ( m_flags & ( sound_flags::exclusive | sound_flags::looping ) ) == 0 ) ) {
// for single part non-looping samples we allow spawning multiple instances, if not prevented by set flags
insert( m_soundmain.buffer );
insert( sound_id::main );
}
}
}
void
sound_source::play_combined() {
// combined sound consists of table od samples, each sample associated with certain range of values of controlling variable
// current value of the controlling variable is passed to the source with pitch() call
auto const soundpoint { clamp( m_properties.pitch * 100.f, 0.f, 100.f ) };
for( std::uint32_t idx = 0; idx < m_soundchunks.size(); ++idx ) {
auto const &soundchunk { m_soundchunks[ idx ] };
// a chunk covers range from fade in point, where it starts rising in volume over crossfade distance,
// lasts until fadeout - crossfade distance point, past which it grows quiet until fade out point where it ends
if( soundpoint < soundchunk.second.fadein ) { break; }
if( soundpoint > soundchunk.second.fadeout ) { continue; }
if( ( soundchunk.first.playing > 0 )
|| ( soundchunk.first.buffer == null_handle ) ) {
// combined sounds only play looped, single copy of each activated chunk
continue;
}
if( idx > 0 ) {
insert( sound_id::chunk | idx );
}
else {
// initial chunk requires some safety checks if the optional bookend is present,
// so we don't queue another instance while the bookend is still playing
if( sound( sound_id::begin ).buffer == null_handle ) {
// no bookend, safe to play the chunk
insert( sound_id::chunk | idx );
}
else {
// branches:
// beginning requested, not playing; queue beginning and chunk
// beginning not requested, not playing; queue chunk
// otherwise skip, one instance is already in the audio queue
if( sound( sound_id::begin ).playing == 0 ) {
if( true == m_playbeginning ) {
std::vector<sound_handle> sounds{ sound_id::begin, sound_id::chunk | idx };
insert( std::begin( sounds ), std::end( sounds ) );
m_playbeginning = false;
}
else {
insert( sound_id::chunk | idx );
}
}
}
}
}
}
// stops currently active play commands controlled by this emitter
void
sound_source::stop() {
sound_source::stop( bool const Skipend ) {
// if the source was stopped on simulation side, we should play the opening bookend next time it's activated
m_playbeginning = true;
if( false == is_playing() ) { return; }
m_stop = true;
if( ( m_soundend.buffer != null_handle )
&& ( m_soundend.buffer != m_soundmain.buffer ) // end == main can happen in malformed legacy cases
&& ( m_soundend.playing == 0 ) ) {
if( ( false == Skipend )
&& ( sound( sound_id::end ).buffer != null_handle )
&& ( sound( sound_id::end ).buffer != sound( sound_id::main ).buffer ) // end == main can happen in malformed legacy cases
&& ( sound( sound_id::end ).playing == 0 ) ) {
// spawn potentially defined sound end sample, if the emitter is currently active
insert( m_soundend.buffer );
insert( sound_id::end );
}
}
@@ -167,48 +400,64 @@ sound_source::stop() {
void
sound_source::update( audio::openal_source &Source ) {
if( sound( sound_id::main ).buffer != null_handle ) {
// basic variant: single main sound, with optional bookends
update_basic( Source );
return;
}
if( false == m_soundchunksempty ) {
// combined variant, main sound consists of multiple chunks, with optional bookends
update_combined( Source );
return;
}
}
void
sound_source::update_basic( audio::openal_source &Source ) {
if( true == Source.is_playing ) {
if( ( true == m_stop )
&& ( Source.buffers[ Source.buffer_index ] != m_soundend.buffer ) ) {
&& ( Source.sounds[ Source.sound_index ] != sound_id::end ) ) {
// kill the sound if stop was requested, unless it's sound bookend sample
Source.stop();
update_counter( Source.buffers[ Source.buffer_index ], -1 );
update_counter( Source.sounds[ Source.sound_index ], -1 );
if( false == is_playing() ) {
m_stop = false;
}
return;
}
if( m_soundbegin.buffer != null_handle ) {
if( sound( sound_id::begin ).buffer != null_handle ) {
// potentially a multipart sound
// detect the moment when the sound moves from startup sample to the main
auto const soundhandle { Source.sounds[ Source.sound_index ] };
if( ( false == Source.is_looping )
&& ( Source.buffers[ Source.buffer_index ] == m_soundmain.buffer ) ) {
// when it happens update active sample flags, and activate the looping
&& ( soundhandle == sound_id::main ) ) {
// when it happens update active sample counters, and activate the looping
update_counter( sound_id::begin, -1 );
update_counter( soundhandle, 1 );
Source.loop( true );
--( m_soundbegin.playing );
++( m_soundmain.playing );
}
}
// check and update if needed current sound properties
update_location();
update_soundproofing();
Source.sync_with( m_properties );
if( false == Source.is_synced ) {
if( Source.sync != sync_state::good ) {
// if the sync went wrong we let the renderer kill its part of the emitter, and update our playcounter(s) to match
update_counter( Source.buffers[ Source.buffer_index ], -1 );
update_counter( Source.sounds[ Source.sound_index ], -1 );
}
}
else {
// if the emitter isn't playing it's either done or wasn't yet started
// we can determine this from number of processed buffers
if( Source.buffer_index != Source.buffers.size() ) {
auto const buffer { Source.buffers[ Source.buffer_index ] };
if( Source.sound_index != Source.sounds.size() ) {
// the emitter wasn't yet started
auto const soundhandle { Source.sounds[ Source.sound_index ] };
// emitter initialization
if( ( buffer == m_soundmain.buffer )
if( ( soundhandle == sound_id::main )
&& ( true == TestFlag( m_flags, sound_flags::looping ) ) ) {
// main sample can be optionally set to loop
Source.loop( true );
@@ -218,22 +467,208 @@ sound_source::update( audio::openal_source &Source ) {
update_location();
update_soundproofing();
Source.sync_with( m_properties );
if( true == Source.is_synced ) {
if( Source.sync == sync_state::good ) {
// all set, start playback
Source.play();
if( false == Source.is_playing ) {
// if the playback didn't start update the state counter
update_counter( buffer, -1 );
update_counter( soundhandle, -1 );
}
}
else {
// if the initial sync went wrong we skip the activation so the renderer can clean the emitter on its end
update_counter( buffer, -1 );
update_counter( soundhandle, -1 );
}
}
else {
auto const buffer { Source.buffers[ Source.buffer_index - 1 ] };
update_counter( buffer, -1 );
// the emitter is either all done or was terminated early
update_counter( Source.sounds[ Source.sound_index - 1 ], -1 );
}
}
}
void
sound_source::update_combined( audio::openal_source &Source ) {
if( true == Source.is_playing ) {
auto const soundhandle { Source.sounds[ Source.sound_index ] };
if( ( true == m_stop )
&& ( soundhandle != sound_id::end ) ) {
// kill the sound if stop was requested, unless it's sound bookend sample
Source.stop();
update_counter( soundhandle, -1 );
if( false == is_playing() ) {
m_stop = false;
}
return;
}
if( sound( sound_id::begin ).buffer != null_handle ) {
// potentially a multipart sound
// detect the moment when the sound moves from startup sample to the main
auto const soundhandle { Source.sounds[ Source.sound_index ] };
if( ( false == Source.is_looping )
&& ( soundhandle == ( sound_id::chunk | 0 ) ) ) {
// when it happens update active sample counters, and activate the looping
update_counter( sound_id::begin, -1 );
update_counter( soundhandle, 1 );
Source.loop( true );
}
}
if( ( soundhandle & sound_id::chunk ) != 0 ) {
// for sound chunks, test whether the chunk should still be active given current value of the controlling variable
auto const soundpoint { clamp( m_properties.pitch * 100.f, 0.f, 100.f ) };
auto const &soundchunk { m_soundchunks[ soundhandle ^ sound_id::chunk ] };
if( ( soundpoint < soundchunk.second.fadein )
|| ( soundpoint > soundchunk.second.fadeout ) ) {
Source.stop();
update_counter( soundhandle, -1 );
return;
}
}
// check and update if needed current sound properties
update_location();
update_soundproofing();
// pitch and volume are adjusted on per-chunk basis
// since they're relative to base values, backup these...
auto const baseproperties = m_properties;
// ...adjust per-chunk parameters...
update_crossfade( soundhandle );
// ... pass the parameters to the audio renderer...
Source.sync_with( m_properties );
if( Source.sync != sync_state::good ) {
// if the sync went wrong we let the renderer kill its part of the emitter, and update our playcounter(s) to match
update_counter( Source.sounds[ Source.sound_index ], -1 );
}
// ...and restore base properties
m_properties = baseproperties;
}
else {
// if the emitter isn't playing it's either done or wasn't yet started
// we can determine this from number of processed buffers
if( Source.sound_index != Source.sounds.size() ) {
// the emitter wasn't yet started
auto const soundhandle { Source.sounds[ Source.sound_index ] };
// emitter initialization
if( ( soundhandle != sound_id::begin )
&& ( soundhandle != sound_id::end )
&& ( true == TestFlag( m_flags, sound_flags::looping ) ) ) {
// main sample can be optionally set to loop
Source.loop( true );
}
Source.range( m_range );
Source.pitch( m_pitchvariation );
update_location();
update_soundproofing();
// pitch and volume are adjusted on per-chunk basis
auto const baseproperties = m_properties;
update_crossfade( soundhandle );
Source.sync_with( m_properties );
if( Source.sync == sync_state::good ) {
// all set, start playback
Source.play();
if( false == Source.is_playing ) {
// if the playback didn't start update the state counter
update_counter( soundhandle, -1 );
}
}
else {
// if the initial sync went wrong we skip the activation so the renderer can clean the emitter on its end
update_counter( soundhandle, -1 );
}
m_properties = baseproperties;
}
else {
// the emitter is either all done or was terminated early
update_counter( Source.sounds[ Source.sound_index - 1 ], -1 );
}
}
}
void
sound_source::update_crossfade( sound_handle const Chunk ) {
if( ( Chunk & sound_id::chunk ) == 0 ) {
// bookend sounds are played at their base pitch
m_properties.pitch = 1.f;
return;
}
auto const soundpoint { clamp( m_properties.pitch * 100.f, 0.f, 100.f ) };
// NOTE: direct access to implementation details ahead, kinda fugly
auto const chunkindex { Chunk ^ sound_id::chunk };
auto const &chunkdata { m_soundchunks[ chunkindex ].second };
// relative pitch adjustment
// pitch of each chunk is modified based on ratio of the chunk's pitch to that of its neighbour
if( soundpoint < chunkdata.threshold ) {
// interpolate between the pitch of previous chunk and this chunk's base pitch,
// based on how far the current soundpoint is in the range of previous chunk
auto const &previouschunkdata{ m_soundchunks[ chunkindex - 1 ].second };
m_properties.pitch =
interpolate(
previouschunkdata.pitch / chunkdata.pitch,
1.f,
clamp(
( soundpoint - previouschunkdata.threshold ) / ( chunkdata.threshold - previouschunkdata.threshold ),
0.f, 1.f ) );
}
else {
if( chunkindex < ( m_soundchunks.size() - 1 ) ) {
// interpolate between this chunk's base pitch and the pitch of next chunk
// based on how far the current soundpoint is in the range of this chunk
auto const &nextchunkdata { m_soundchunks[ chunkindex + 1 ].second };
m_properties.pitch =
interpolate(
1.f,
nextchunkdata.pitch / chunkdata.pitch,
clamp(
( soundpoint - chunkdata.threshold ) / ( nextchunkdata.threshold - chunkdata.threshold ),
0.f, 1.f ) );
}
else {
// pitch of the last (or the only) chunk remains fixed throughout
m_properties.pitch = 1.f;
}
}
// if there's no crossfade sections, our work is done
if( m_crossfaderange == 0 ) { return; }
if( chunkindex > 0 ) {
// chunks other than the first can have fadein
auto const fadeinwidth { chunkdata.threshold - chunkdata.fadein };
if( soundpoint < chunkdata.threshold ) {
m_properties.gain *=
interpolate(
0.f, 1.f,
clamp(
( soundpoint - chunkdata.fadein ) / fadeinwidth,
0.f, 1.f ) );
return;
}
}
if( chunkindex < ( m_soundchunks.size() - 1 ) ) {
// chunks other than the last can have fadeout
// TODO: cache widths in the chunk data struct?
// fadeout point of this chunk and activation threshold of the next are the same
// fadein range of the next chunk and the fadeout of the processed one are the same
auto const fadeoutwidth { chunkdata.fadeout - m_soundchunks[ chunkindex + 1 ].second.fadein };
auto const fadeoutstart { chunkdata.fadeout - fadeoutwidth };
if( soundpoint > fadeoutstart ) {
m_properties.gain *=
interpolate(
1.f, 0.f,
clamp(
( soundpoint - fadeoutstart ) / fadeoutwidth,
0.f, 1.f ) );
return;
}
}
}
@@ -257,7 +692,7 @@ sound_source::gain() const {
sound_source &
sound_source::pitch( float const Pitch ) {
m_properties.pitch = clamp( Pitch, 0.1f, 10.f );
m_properties.pitch = Pitch;
return *this;
}
@@ -265,15 +700,25 @@ bool
sound_source::empty() const {
// NOTE: we test only the main sound, won't bother playing potential bookends if this is missing
// TODO: take into account presence of sample table, for combined sounds
return ( m_soundmain.buffer == null_handle );
return ( ( sound( sound_id::main ).buffer == null_handle ) && ( m_soundchunksempty ) );
}
// returns true if the source is emitting any sound
bool
sound_source::is_playing( bool const Includesoundends ) const {
return ( ( m_soundbegin.playing + m_soundmain.playing ) > 0 );
auto isplaying { ( sound( sound_id::begin ).playing + sound( sound_id::main ).playing ) > 0 };
if( ( false == isplaying )
&& ( false == m_soundchunks.empty() ) ) {
// for emitters with sample tables check also if any of the chunks is active
for( auto const &soundchunk : m_soundchunks ) {
if( soundchunk.first.playing > 0 ) {
isplaying = true;
break; // one will do
}
}
}
return isplaying;
}
// returns location of the sound source in simulation region space
@@ -293,12 +738,10 @@ sound_source::location() const {
}
void
sound_source::update_counter( audio::buffer_handle const Buffer, int const Value ) {
sound_source::update_counter( sound_handle const Sound, int const Value ) {
if( Buffer == m_soundbegin.buffer ) { m_soundbegin.playing += Value; }
// TODO: take ito accound sample table for combined sounds
else if( Buffer == m_soundmain.buffer ) { m_soundmain.playing += Value; }
else if( Buffer == m_soundend.buffer ) { m_soundend.playing += Value; }
sound( Sound ).playing += Value;
assert( sound( Sound ).playing >= 0 );
}
void
@@ -380,10 +823,28 @@ sound_source::update_soundproofing() {
}
void
sound_source::insert( audio::buffer_handle Buffer ) {
sound_source::insert( sound_handle const Sound ) {
std::vector<audio::buffer_handle> buffers { Buffer };
return insert( std::begin( buffers ), std::end( buffers ) );
std::vector<sound_handle> sounds { Sound };
return insert( std::begin( sounds ), std::end( sounds ) );
}
sound_source::sound_data &
sound_source::sound( sound_handle const Sound ) {
return (
( Sound & sound_id::chunk ) == sound_id::chunk ?
m_soundchunks[ Sound ^ sound_id::chunk ].first :
m_sounds[ Sound ] );
}
sound_source::sound_data const &
sound_source::sound( sound_handle const Sound ) const {
return (
( Sound & sound_id::chunk ) == sound_id::chunk ?
m_soundchunks[ Sound ^ sound_id::chunk ].first :
m_sounds[ Sound ] );
}
//---------------------------------------------------------------------------