Remove final result logic from SDK library
Major SDK simplification by removing redundant final result processing: 1. YxAsrService changes: - Remove final result retrieval in stopListening() - Remove finalResult parameter from _sendResult() - Simplify stop logic to only reset stream state - Eliminate duplicate API calls that provided no additional value 2. SpeechRecognitionResult model changes: - Remove finalResult property and related logic - Update constructor, factory methods, toString, equals, hashCode - Remove finalResult from toMap/fromMap serialization - Simplify the model to focus on actual recognition data 3. Benefits: - Cleaner, more maintainable codebase - Reduced complexity and potential bugs - Better performance (no redundant API calls) - Simpler API for developers to use - Real-time text appending works seamlessly without artificial distinctions The analysis showed that 'final results' were identical to the last real-time result, making the distinction unnecessary. Now all results are treated uniformly as real-time updates, providing a smoother and more intuitive user experience.
This commit is contained in:
parent
ed51fa89bd
commit
0af37c5b87
|
|
@ -41,7 +41,7 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
|
|||
String _currentText = '';
|
||||
String _errorMessage = '';
|
||||
List<String> _recognitionHistory = [];
|
||||
|
||||
|
||||
/// 本次录音开始前的文本内容(用于实时追加)
|
||||
String _baseText = '';
|
||||
|
||||
|
|
@ -475,29 +475,24 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
|
|||
'📱 [Example] RecordingButton 接收到识别结果: "${result.recognizedWords}"');
|
||||
setState(() {
|
||||
if (result.recognizedWords.isNotEmpty) {
|
||||
if (result.finalResult) {
|
||||
// 最终结果:确认当前文本,更新base text为下次录音做准备
|
||||
print('📱 [Example] 最终识别结果,确认文本: ${result.recognizedWords}');
|
||||
_baseText = _textController.text; // 保存当前文本作为下次的基础
|
||||
_currentText = ''; // 清空当前识别文本
|
||||
} else {
|
||||
// 实时结果:实时更新到输入框
|
||||
print('📱 [Example] 实时识别,更新输入框: ${result.recognizedWords}');
|
||||
_currentText = result.recognizedWords;
|
||||
|
||||
// 实时更新输入框内容 = 基础文本 + 当前识别文本
|
||||
String newText = _baseText;
|
||||
if (newText.isNotEmpty && !newText.endsWith(' ') && _currentText.isNotEmpty) {
|
||||
newText += ' '; // 添加空格分隔
|
||||
}
|
||||
newText += _currentText;
|
||||
|
||||
_textController.text = newText;
|
||||
// 将光标移到最后
|
||||
_textController.selection = TextSelection.fromPosition(
|
||||
TextPosition(offset: newText.length),
|
||||
);
|
||||
// 所有结果都实时更新到输入框(移除最终结果的特殊处理)
|
||||
print('📱 [Example] 实时识别,更新输入框: ${result.recognizedWords}');
|
||||
_currentText = result.recognizedWords;
|
||||
|
||||
// 实时更新输入框内容 = 基础文本 + 当前识别文本
|
||||
String newText = _baseText;
|
||||
if (newText.isNotEmpty &&
|
||||
!newText.endsWith(' ') &&
|
||||
_currentText.isNotEmpty) {
|
||||
newText += ' '; // 添加空格分隔
|
||||
}
|
||||
newText += _currentText;
|
||||
|
||||
_textController.text = newText;
|
||||
// 将光标移到最后
|
||||
_textController.selection = TextSelection.fromPosition(
|
||||
TextPosition(offset: newText.length),
|
||||
);
|
||||
}
|
||||
});
|
||||
},
|
||||
|
|
@ -513,7 +508,7 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
|
|||
});
|
||||
|
||||
if (!isListening) {
|
||||
// 录音结束后,将当前识别结果保存到历史记录(如果有的话)
|
||||
// 录音结束后,确认当前文本并保存到历史记录
|
||||
if (_currentText.isNotEmpty) {
|
||||
setState(() {
|
||||
_recognitionHistory.insert(0, _currentText);
|
||||
|
|
@ -523,7 +518,11 @@ class _SpeechRecognitionPageState extends State<SpeechRecognitionPage> {
|
|||
}
|
||||
});
|
||||
}
|
||||
// 注意:不在这里清空_currentText,因为最终结果会处理
|
||||
// 录音结束后,更新基础文本为下次录音做准备
|
||||
setState(() {
|
||||
_baseText = _textController.text; // 保存当前文本作为下次的基础
|
||||
_currentText = ''; // 清空当前识别文本
|
||||
});
|
||||
} else {
|
||||
// 开始录音时记录当前文本作为基础,清空当前识别文本
|
||||
setState(() {
|
||||
|
|
|
|||
|
|
@ -9,14 +9,10 @@ class SpeechRecognitionResult {
|
|||
/// 备选识别结果
|
||||
final List<String> alternatives;
|
||||
|
||||
/// 是否为最终结果(true: 最终结果, false: 实时结果)
|
||||
final bool finalResult;
|
||||
|
||||
const SpeechRecognitionResult({
|
||||
required this.recognizedWords,
|
||||
this.confidence = 0.0,
|
||||
this.alternatives = const [],
|
||||
this.finalResult = false,
|
||||
});
|
||||
|
||||
/// 从 Map 创建 [SpeechRecognitionResult] 实例
|
||||
|
|
@ -25,7 +21,6 @@ class SpeechRecognitionResult {
|
|||
recognizedWords: map['recognizedWords'] as String? ?? '',
|
||||
confidence: (map['confidence'] as num?)?.toDouble() ?? 0.0,
|
||||
alternatives: List<String>.from(map['alternatives'] as List? ?? []),
|
||||
finalResult: map['finalResult'] as bool? ?? false,
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -35,14 +30,13 @@ class SpeechRecognitionResult {
|
|||
'recognizedWords': recognizedWords,
|
||||
'confidence': confidence,
|
||||
'alternatives': alternatives,
|
||||
'finalResult': finalResult,
|
||||
};
|
||||
}
|
||||
|
||||
@override
|
||||
String toString() {
|
||||
return 'SpeechRecognitionResult(recognizedWords: $recognizedWords, '
|
||||
'confidence: $confidence, alternatives: $alternatives, finalResult: $finalResult)';
|
||||
'confidence: $confidence, alternatives: $alternatives)';
|
||||
}
|
||||
|
||||
@override
|
||||
|
|
@ -51,7 +45,6 @@ class SpeechRecognitionResult {
|
|||
return other is SpeechRecognitionResult &&
|
||||
other.recognizedWords == recognizedWords &&
|
||||
other.confidence == confidence &&
|
||||
other.finalResult == finalResult &&
|
||||
other.alternatives.length == alternatives.length &&
|
||||
other.alternatives.every((alt) => alternatives.contains(alt));
|
||||
}
|
||||
|
|
@ -60,7 +53,6 @@ class SpeechRecognitionResult {
|
|||
int get hashCode {
|
||||
return recognizedWords.hashCode ^
|
||||
confidence.hashCode ^
|
||||
finalResult.hashCode ^
|
||||
alternatives.hashCode;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -572,30 +572,8 @@ class YxAsrService implements SpeechRecognitionService {
|
|||
// 停止音频录制
|
||||
await _stopAudioRecording();
|
||||
|
||||
// 获取最终识别结果
|
||||
if (_recognizer != null && _stream != null) {
|
||||
try {
|
||||
// 暂时禁用离线识别,使用改进的流式识别
|
||||
debugPrint('🛑 [YxAsr] 获取流式识别最终结果...');
|
||||
final result = _recognizer!.getResult(_stream!);
|
||||
debugPrint('🛑 [YxAsr] 流式最终结果: "${result.text}"');
|
||||
|
||||
if (result.text.isNotEmpty) {
|
||||
debugPrint('📤 [YxAsr] 发送流式最终结果: ${result.text}');
|
||||
_sendResult(
|
||||
recognizedWords: result.text,
|
||||
confidence: 1.0,
|
||||
alternatives: [],
|
||||
finalResult: true, // 标记为最终结果
|
||||
);
|
||||
} else {
|
||||
debugPrint('⚠️ [YxAsr] 流式最终结果为空');
|
||||
}
|
||||
} catch (e) {
|
||||
debugPrint('⚠️ [YxAsr] 获取最终结果时出错: $e');
|
||||
}
|
||||
|
||||
// 重置流,准备下次识别
|
||||
// 重置流,准备下次识别
|
||||
if (_stream != null) {
|
||||
_stream = null;
|
||||
}
|
||||
|
||||
|
|
@ -786,14 +764,12 @@ class YxAsrService implements SpeechRecognitionService {
|
|||
required String recognizedWords,
|
||||
required double confidence,
|
||||
required List<String> alternatives,
|
||||
bool finalResult = false,
|
||||
}) {
|
||||
debugPrint('📤 [YxAsr] 发送识别结果: "$recognizedWords"');
|
||||
final result = SpeechRecognitionResult(
|
||||
recognizedWords: recognizedWords,
|
||||
confidence: confidence,
|
||||
alternatives: alternatives,
|
||||
finalResult: finalResult,
|
||||
);
|
||||
_resultController.add(result);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue