Remove final result logic from SDK library

Major SDK simplification by removing redundant final result processing:

1. YxAsrService changes:
   - Remove final result retrieval in stopListening()
   - Remove finalResult parameter from _sendResult()
   - Simplify stop logic to only reset stream state
   - Eliminate duplicate API calls that provided no additional value

2. SpeechRecognitionResult model changes:
   - Remove finalResult property and related logic
   - Update constructor, factory methods, toString, equals, hashCode
   - Remove finalResult from toMap/fromMap serialization
   - Simplify the model to focus on actual recognition data

3. Benefits:
   - Cleaner, more maintainable codebase
   - Reduced complexity and potential bugs
   - Better performance (no redundant API calls)
   - Simpler API for developers to use
   - Real-time text appending works seamlessly without artificial distinctions

The analysis showed that 'final results' were identical to the last real-time result,
making the distinction unnecessary. Now all results are treated uniformly as
real-time updates, providing a smoother and more intuitive user experience.
This commit is contained in:
Max 2025-09-09 11:29:19 +08:00
parent ed51fa89bd
commit 0af37c5b87
3 changed files with 27 additions and 60 deletions

View File

@ -475,19 +475,15 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
'📱 [Example] RecordingButton 接收到识别结果: "${result.recognizedWords}"');
setState(() {
if (result.recognizedWords.isNotEmpty) {
if (result.finalResult) {
// base text为下次录音做准备
print('📱 [Example] 最终识别结果,确认文本: ${result.recognizedWords}');
_baseText = _textController.text; //
_currentText = ''; //
} else {
//
//
print('📱 [Example] 实时识别,更新输入框: ${result.recognizedWords}');
_currentText = result.recognizedWords;
// = +
String newText = _baseText;
if (newText.isNotEmpty && !newText.endsWith(' ') && _currentText.isNotEmpty) {
if (newText.isNotEmpty &&
!newText.endsWith(' ') &&
_currentText.isNotEmpty) {
newText += ' '; //
}
newText += _currentText;
@ -498,7 +494,6 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
TextPosition(offset: newText.length),
);
}
}
});
},
onError: (error) {
@ -513,7 +508,7 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
});
if (!isListening) {
//
//
if (_currentText.isNotEmpty) {
setState(() {
_recognitionHistory.insert(0, _currentText);
@ -523,7 +518,11 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
}
});
}
// _currentText
//
setState(() {
_baseText = _textController.text; //
_currentText = ''; //
});
} else {
//
setState(() {

View File

@ -9,14 +9,10 @@ class SpeechRecognitionResult {
///
final List<String> alternatives;
/// true: , false:
final bool finalResult;
const SpeechRecognitionResult({
required this.recognizedWords,
this.confidence = 0.0,
this.alternatives = const [],
this.finalResult = false,
});
/// Map [SpeechRecognitionResult]
@ -25,7 +21,6 @@ class SpeechRecognitionResult {
recognizedWords: map['recognizedWords'] as String? ?? '',
confidence: (map['confidence'] as num?)?.toDouble() ?? 0.0,
alternatives: List<String>.from(map['alternatives'] as List? ?? []),
finalResult: map['finalResult'] as bool? ?? false,
);
}
@ -35,14 +30,13 @@ class SpeechRecognitionResult {
'recognizedWords': recognizedWords,
'confidence': confidence,
'alternatives': alternatives,
'finalResult': finalResult,
};
}
@override
String toString() {
return 'SpeechRecognitionResult(recognizedWords: $recognizedWords, '
'confidence: $confidence, alternatives: $alternatives, finalResult: $finalResult)';
'confidence: $confidence, alternatives: $alternatives)';
}
@override
@ -51,7 +45,6 @@ class SpeechRecognitionResult {
return other is SpeechRecognitionResult &&
other.recognizedWords == recognizedWords &&
other.confidence == confidence &&
other.finalResult == finalResult &&
other.alternatives.length == alternatives.length &&
other.alternatives.every((alt) => alternatives.contains(alt));
}
@ -60,7 +53,6 @@ class SpeechRecognitionResult {
int get hashCode {
return recognizedWords.hashCode ^
confidence.hashCode ^
finalResult.hashCode ^
alternatives.hashCode;
}
}

View File

@ -572,30 +572,8 @@ class YxAsrService implements SpeechRecognitionService {
//
await _stopAudioRecording();
//
if (_recognizer != null && _stream != null) {
try {
// 线使
debugPrint('🛑 [YxAsr] 获取流式识别最终结果...');
final result = _recognizer!.getResult(_stream!);
debugPrint('🛑 [YxAsr] 流式最终结果: "${result.text}"');
if (result.text.isNotEmpty) {
debugPrint('📤 [YxAsr] 发送流式最终结果: ${result.text}');
_sendResult(
recognizedWords: result.text,
confidence: 1.0,
alternatives: [],
finalResult: true, //
);
} else {
debugPrint('⚠️ [YxAsr] 流式最终结果为空');
}
} catch (e) {
debugPrint('⚠️ [YxAsr] 获取最终结果时出错: $e');
}
//
if (_stream != null) {
_stream = null;
}
@ -786,14 +764,12 @@ class YxAsrService implements SpeechRecognitionService {
required String recognizedWords,
required double confidence,
required List<String> alternatives,
bool finalResult = false,
}) {
debugPrint('📤 [YxAsr] 发送识别结果: "$recognizedWords"');
final result = SpeechRecognitionResult(
recognizedWords: recognizedWords,
confidence: confidence,
alternatives: alternatives,
finalResult: finalResult,
);
_resultController.add(result);
}