|
1 |
| -import React, { useState, useRef, useEffect } from "react"; |
2 |
| -import "../css/Avatar.css"; |
3 |
| -import * as SpeechSDK from "microsoft-cognitiveservices-speech-sdk"; |
4 |
| -import { createAvatarSynthesizer, createWebRTCConnection } from "./Utility"; |
5 |
| -import { avatarAppConfig } from "./config"; |
6 |
| -import Carousel from "../Carousel"; |
7 |
| -import DescriptionSpot from "../DescriptionSpot"; |
8 |
| -import TextToSpeech from "../TextToSpeech"; |
9 |
| - |
10 |
| -interface AvatarProps { |
11 |
| - conferenceText: string; |
12 |
| - images : string[]; |
13 |
| -} |
14 |
| - |
15 |
| -const Avatar = ({ conferenceText, images }: AvatarProps) => { |
16 |
| - const [avatarSynthesizer, setAvatarSynthesizer] = useState<any>(null); |
17 |
| - const myAvatarVideoRef = useRef<HTMLDivElement>(null); |
18 |
| - const myAvatarVideoEleRef = useRef<HTMLVideoElement>(null); |
19 |
| - const myAvatarAudioEleRef = useRef<HTMLAudioElement>(null); |
20 |
| - const [description, setDescription] = useState<string>(""); |
21 |
| - const [isAvatarStarted, setIsAvatarStarted] = useState<boolean>(false); |
22 |
| - const iceUrl = avatarAppConfig.iceUrl; |
23 |
| - const iceUsername = avatarAppConfig.iceUsername; |
24 |
| - const iceCredential = avatarAppConfig.iceCredential; |
25 |
| - |
26 |
| - const handleOnTrack = (event: any) => { |
27 |
| - console.log("#### Printing handle onTrack ", event); |
28 |
| - console.log("Printing event.track.kind ", event.track.kind); |
29 |
| - if (event.track.kind === "video") { |
30 |
| - const mediaPlayer = myAvatarVideoEleRef.current; |
31 |
| - mediaPlayer!.id = event.track.kind; |
32 |
| - mediaPlayer!.srcObject = event.streams[0]; |
33 |
| - mediaPlayer!.autoplay = true; |
34 |
| - mediaPlayer!.playsInline = true; |
35 |
| - mediaPlayer!.addEventListener("play", () => { |
36 |
| - window.requestAnimationFrame(() => {}); |
37 |
| - }); |
38 |
| - } else { |
39 |
| - const audioPlayer = myAvatarAudioEleRef.current; |
40 |
| - audioPlayer!.srcObject = event.streams[0]; |
41 |
| - audioPlayer!.autoplay = true; |
42 |
| - audioPlayer!.muted = true; |
43 |
| - } |
44 |
| - }; |
45 |
| - |
46 |
| - const stopSession = () => { |
47 |
| - try { |
48 |
| - avatarSynthesizer.stopSpeakingAsync().then(() => { |
49 |
| - console.log("[" + new Date().toISOString() + "] Stop speaking request sent."); |
50 |
| - avatarSynthesizer.close(); |
51 |
| - }).catch((error: any) => { |
52 |
| - console.error(error); |
53 |
| - }); |
54 |
| - } catch (e) { |
55 |
| - console.error(e); |
56 |
| - } |
57 |
| - }; |
58 |
| - |
59 |
| - const speakSelectedText = (text : string) => { |
60 |
| - if (!avatarSynthesizer) { |
61 |
| - console.error("Avatar synthesizer is not initialized."); |
62 |
| - return; |
63 |
| - } |
64 |
| - |
65 |
| - const audioPlayer = myAvatarAudioEleRef.current; |
66 |
| - audioPlayer!.muted = false; |
67 |
| - |
68 |
| - avatarSynthesizer.speakTextAsync(text).then((result: any) => { |
69 |
| - if (result.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) { |
70 |
| - console.log("Speech and avatar synthesized to video stream."); |
71 |
| - setIsAvatarStarted(true); |
72 |
| - } else { |
73 |
| - console.log("Unable to speak. Result ID: " + result.resultId); |
74 |
| - if (result.reason === SpeechSDK.ResultReason.Canceled) { |
75 |
| - let cancellationDetails = SpeechSDK.CancellationDetails.fromResult(result); |
76 |
| - console.log(cancellationDetails.reason); |
77 |
| - if (cancellationDetails.reason === SpeechSDK.CancellationReason.Error) { |
78 |
| - console.log(cancellationDetails.errorDetails); |
79 |
| - } |
80 |
| - } |
81 |
| - } |
82 |
| - }).catch((error: any) => { |
83 |
| - console.error(error); |
84 |
| - if (avatarSynthesizer) { |
85 |
| - avatarSynthesizer.close(); |
86 |
| - } |
87 |
| - }); |
88 |
| -}; |
89 |
| - |
90 |
| - |
91 |
| -const startSession = async () => { |
92 |
| - let peerConnection = createWebRTCConnection( |
93 |
| - iceUrl, |
94 |
| - iceUsername, |
95 |
| - iceCredential |
96 |
| - ); |
97 |
| - peerConnection.ontrack = handleOnTrack; |
98 |
| - peerConnection.addTransceiver('video', { direction: 'sendrecv' }); |
99 |
| - peerConnection.addTransceiver('audio', { direction: 'sendrecv' }); |
100 |
| - |
101 |
| - let avatarSynthesizer = createAvatarSynthesizer(); |
102 |
| - setAvatarSynthesizer(avatarSynthesizer); |
103 |
| - avatarSynthesizer |
104 |
| - .startAvatarAsync(peerConnection) |
105 |
| - .then((r) => { |
106 |
| - if (r.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) { |
107 |
| - console.log("Speech and avatar synthesized to video stream."); |
108 |
| - } |
109 |
| - console.log('[' + new Date().toISOString() + '] Avatar started.'); |
110 |
| - setIsAvatarStarted(true); |
111 |
| - }) |
112 |
| - .catch((error) => { |
113 |
| - console.log( |
114 |
| - '[' + |
115 |
| - new Date().toISOString() + |
116 |
| - '] Avatar failed to start. Error: ' + |
117 |
| - error |
118 |
| - ); |
119 |
| - }); |
120 |
| - }; |
121 |
| - |
122 |
| - |
123 |
| - |
124 |
| - useEffect(() => { |
125 |
| - startSession(); |
126 |
| - }, []); |
127 |
| - |
128 |
| - |
129 |
| - useEffect(() => { |
130 |
| - setTimeout(setDescription, 1000,conferenceText); |
131 |
| - }, [isAvatarStarted]); |
132 |
| - |
133 |
| - |
134 |
| - useEffect(() => { |
135 |
| - if (description) { |
136 |
| - speakSelectedText(conferenceText); |
137 |
| - } |
138 |
| - }, [description]); |
139 |
| - |
140 |
| - return ( |
141 |
| - <div className="container myAvatarContainer flex-row"> |
142 |
| - <div className="container myAvatarVideoRootDiv d-flex justify-content-between"> |
| 1 | +// import { useState, useRef, useEffect } from "react"; |
| 2 | +// import "../css/Avatar.css"; |
| 3 | +// import * as SpeechSDK from "microsoft-cognitiveservices-speech-sdk"; |
| 4 | +// import { createAvatarSynthesizer, createWebRTCConnection } from "./Utility"; |
| 5 | +// import { avatarAppConfig } from "./config"; |
| 6 | +// import Carousel from "../Carousel"; |
| 7 | +// import DescriptionSpot from "../DescriptionSpot"; |
| 8 | +// import TextToSpeech from "../TextToSpeech"; |
| 9 | + |
| 10 | +// interface AvatarProps { |
| 11 | +// conferenceText: string; |
| 12 | +// images : string[]; |
| 13 | +// } |
| 14 | + |
| 15 | +// const Avatar = ({ conferenceText, images }: AvatarProps) => { |
| 16 | +// const [avatarSynthesizer, setAvatarSynthesizer] = useState<any>(null); |
| 17 | +// const myAvatarVideoRef = useRef<HTMLDivElement>(null); |
| 18 | +// const myAvatarVideoEleRef = useRef<HTMLVideoElement>(null); |
| 19 | +// const myAvatarAudioEleRef = useRef<HTMLAudioElement>(null); |
| 20 | +// const [description, setDescription] = useState<string>(""); |
| 21 | +// const [isAvatarStarted, setIsAvatarStarted] = useState<boolean>(false); |
| 22 | +// const iceUrl = avatarAppConfig.iceUrl; |
| 23 | +// const iceUsername = avatarAppConfig.iceUsername; |
| 24 | +// const iceCredential = avatarAppConfig.iceCredential; |
| 25 | + |
| 26 | +// const handleOnTrack = (event: any) => { |
| 27 | +// console.log("#### Printing handle onTrack ", event); |
| 28 | +// console.log("Printing event.track.kind ", event.track.kind); |
| 29 | +// if (event.track.kind === "video") { |
| 30 | +// const mediaPlayer = myAvatarVideoEleRef.current; |
| 31 | +// mediaPlayer!.id = event.track.kind; |
| 32 | +// mediaPlayer!.srcObject = event.streams[0]; |
| 33 | +// mediaPlayer!.autoplay = true; |
| 34 | +// mediaPlayer!.playsInline = true; |
| 35 | +// mediaPlayer!.addEventListener("play", () => { |
| 36 | +// window.requestAnimationFrame(() => {}); |
| 37 | +// }); |
| 38 | +// } else { |
| 39 | +// const audioPlayer = myAvatarAudioEleRef.current; |
| 40 | +// audioPlayer!.srcObject = event.streams[0]; |
| 41 | +// audioPlayer!.autoplay = true; |
| 42 | +// audioPlayer!.muted = true; |
| 43 | +// } |
| 44 | +// }; |
| 45 | + |
| 46 | +// const stopSession = () => { |
| 47 | +// try { |
| 48 | +// avatarSynthesizer.stopSpeakingAsync().then(() => { |
| 49 | +// console.log("[" + new Date().toISOString() + "] Stop speaking request sent."); |
| 50 | +// avatarSynthesizer.close(); |
| 51 | +// }).catch((error: any) => { |
| 52 | +// console.error(error); |
| 53 | +// }); |
| 54 | +// } catch (e) { |
| 55 | +// console.error(e); |
| 56 | +// } |
| 57 | +// }; |
| 58 | + |
| 59 | +// const speakSelectedText = (text : string) => { |
| 60 | +// if (!avatarSynthesizer) { |
| 61 | +// console.error("Avatar synthesizer is not initialized."); |
| 62 | +// return; |
| 63 | +// } |
| 64 | + |
| 65 | +// const audioPlayer = myAvatarAudioEleRef.current; |
| 66 | +// audioPlayer!.muted = false; |
| 67 | + |
| 68 | +// avatarSynthesizer.speakTextAsync(text).then((result: any) => { |
| 69 | +// if (result.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) { |
| 70 | +// console.log("Speech and avatar synthesized to video stream."); |
| 71 | +// setIsAvatarStarted(true); |
| 72 | +// } else { |
| 73 | +// console.log("Unable to speak. Result ID: " + result.resultId); |
| 74 | +// if (result.reason === SpeechSDK.ResultReason.Canceled) { |
| 75 | +// let cancellationDetails = SpeechSDK.CancellationDetails.fromResult(result); |
| 76 | +// console.log(cancellationDetails.reason); |
| 77 | +// if (cancellationDetails.reason === SpeechSDK.CancellationReason.Error) { |
| 78 | +// console.log(cancellationDetails.errorDetails); |
| 79 | +// } |
| 80 | +// } |
| 81 | +// } |
| 82 | +// }).catch((error: any) => { |
| 83 | +// console.error(error); |
| 84 | +// if (avatarSynthesizer) { |
| 85 | +// avatarSynthesizer.close(); |
| 86 | +// } |
| 87 | +// }); |
| 88 | +// }; |
| 89 | + |
| 90 | + |
| 91 | +// const startSession = async () => { |
| 92 | +// let peerConnection = createWebRTCConnection( |
| 93 | +// iceUrl, |
| 94 | +// iceUsername, |
| 95 | +// iceCredential |
| 96 | +// ); |
| 97 | +// peerConnection.ontrack = handleOnTrack; |
| 98 | +// peerConnection.addTransceiver('video', { direction: 'sendrecv' }); |
| 99 | +// peerConnection.addTransceiver('audio', { direction: 'sendrecv' }); |
| 100 | + |
| 101 | +// let avatarSynthesizer = createAvatarSynthesizer(); |
| 102 | +// setAvatarSynthesizer(avatarSynthesizer); |
| 103 | +// avatarSynthesizer |
| 104 | +// .startAvatarAsync(peerConnection) |
| 105 | +// .then((r) => { |
| 106 | +// if (r.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) { |
| 107 | +// console.log("Speech and avatar synthesized to video stream."); |
| 108 | +// } |
| 109 | +// console.log('[' + new Date().toISOString() + '] Avatar started.'); |
| 110 | +// setIsAvatarStarted(true); |
| 111 | +// }) |
| 112 | +// .catch((error) => { |
| 113 | +// console.log( |
| 114 | +// '[' + |
| 115 | +// new Date().toISOString() + |
| 116 | +// '] Avatar failed to start. Error: ' + |
| 117 | +// error |
| 118 | +// ); |
| 119 | +// }); |
| 120 | +// }; |
| 121 | + |
| 122 | + |
| 123 | + |
| 124 | +// useEffect(() => { |
| 125 | +// startSession(); |
| 126 | +// }, []); |
| 127 | + |
| 128 | + |
| 129 | +// useEffect(() => { |
| 130 | +// setTimeout(setDescription, 1000,conferenceText); |
| 131 | +// }, [isAvatarStarted]); |
| 132 | + |
| 133 | + |
| 134 | +// useEffect(() => { |
| 135 | +// if (description) { |
| 136 | +// speakSelectedText(conferenceText); |
| 137 | +// } |
| 138 | +// }, [description]); |
| 139 | + |
| 140 | +// return ( |
| 141 | +// <div className="container myAvatarContainer flex-row"> |
| 142 | +// <div className="container myAvatarVideoRootDiv d-flex justify-content-between"> |
143 | 143 |
|
144 |
| - <Carousel imagesUrls={images} /> |
| 144 | +// <Carousel imagesUrls={images} /> |
145 | 145 |
|
146 |
| - <TextToSpeech content={conferenceText} /> |
| 146 | +// <TextToSpeech content={conferenceText} /> |
147 | 147 |
|
148 |
| - <DescriptionSpot description={description} /> |
| 148 | +// <DescriptionSpot description={description} /> |
149 | 149 |
|
150 |
| - </div> |
151 |
| - </div> |
152 |
| - ); |
| 150 | +// </div> |
| 151 | +// </div> |
| 152 | +// ); |
153 | 153 |
|
154 |
| -}; |
| 154 | +// }; |
155 | 155 |
|
156 |
| -export default Avatar; |
| 156 | +// export default Avatar; |
0 commit comments