Don't let dtmf leak over into the engine and let it skew the results... also give DTMF results priority. (issue #9014 reported by surftek)

git-svn-id: https://origsvn.digium.com/svn/asterisk/branches/1.4@54714 65c4cc65-6c06-0410-ace0-fbb531ad65f3
This commit is contained in:
Joshua Colp
2007-02-16 00:48:48 +00:00
parent bddfe6fea7
commit ae5eec37dd

View File

@@ -614,39 +614,43 @@ static int speech_background(struct ast_channel *chan, void *data)
time(&start); time(&start);
started = 1; started = 1;
} }
/* Deal with audio frames if present */ /* Write audio frame out to speech engine if no DTMF has been received */
if (f != NULL && f->frametype == AST_FRAME_VOICE) { if (!strlen(dtmf) && f != NULL && f->frametype == AST_FRAME_VOICE) {
ast_speech_write(speech, f->data, f->datalen); ast_speech_write(speech, f->data, f->datalen);
} }
break; break;
case AST_SPEECH_STATE_WAIT: case AST_SPEECH_STATE_WAIT:
/* Cue up waiting sound if not already playing */ /* Cue up waiting sound if not already playing */
if (chan->stream == NULL) { if (!strlen(dtmf)) {
if (speech->processing_sound != NULL) { if (chan->stream == NULL) {
if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { if (speech->processing_sound != NULL) {
speech_streamfile(chan, speech->processing_sound, chan->language); if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) {
} speech_streamfile(chan, speech->processing_sound, chan->language);
} }
} else if (chan->streamid == -1 && chan->timingfunc == NULL) { }
ast_stopstream(chan); } else if (chan->streamid == -1 && chan->timingfunc == NULL) {
if (speech->processing_sound != NULL) { ast_stopstream(chan);
if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { if (speech->processing_sound != NULL) {
speech_streamfile(chan, speech->processing_sound, chan->language); if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) {
} speech_streamfile(chan, speech->processing_sound, chan->language);
} }
} }
}
}
break; break;
case AST_SPEECH_STATE_DONE: case AST_SPEECH_STATE_DONE:
/* Copy to speech structure the results, if available */ /* Now that we are done... let's switch back to not ready state */
speech->results = ast_speech_results_get(speech);
/* Now that we are done... let's switch back to not ready state */
ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY); ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY);
/* Break out of our background too */ if (!strlen(dtmf)) {
done = 1; /* Copy to speech structure the results, if available */
/* Stop audio playback */ speech->results = ast_speech_results_get(speech);
if (chan->stream != NULL) { /* Break out of our background too */
ast_stopstream(chan); done = 1;
} /* Stop audio playback */
if (chan->stream != NULL) {
ast_stopstream(chan);
}
}
break; break;
default: default:
break; break;
@@ -688,7 +692,7 @@ static int speech_background(struct ast_channel *chan, void *data)
} }
} }
if (strlen(dtmf) > 0 && speech->results == NULL) { if (strlen(dtmf)) {
/* We sort of make a results entry */ /* We sort of make a results entry */
speech->results = ast_calloc(1, sizeof(*speech->results)); speech->results = ast_calloc(1, sizeof(*speech->results));
if (speech->results != NULL) { if (speech->results != NULL) {