Skip to content

Commit 8386513

Browse files
authored
Merge pull request #50 from micbelgique/cleanTest
Clean test
2 parents 15928be + e97c0b9 commit 8386513

File tree

7 files changed

+200
-245
lines changed

7 files changed

+200
-245
lines changed

.github/workflows/azure-static-web-apps-black-rock-04b9de903.yml

+1-5
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,7 @@ name: Azure Static Web Apps CI/CD
33
on:
44
push:
55
branches:
6-
- main
7-
pull_request:
8-
types: [opened, synchronize, reopened, closed]
9-
branches:
10-
- main
6+
-cleanTest
117

128
jobs:
139
build_and_deploy_job:

src/conference-ia/.eslintrc.cjs

+1-4
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,6 @@ module.exports = {
1010
parser: '@typescript-eslint/parser',
1111
plugins: ['react-refresh'],
1212
rules: {
13-
'react-refresh/only-export-components': [
14-
'warn',
15-
{ allowConstantExport: true },
16-
],
13+
'no-unused-vars': 'warn',
1714
},
1815
}

src/conference-ia/src/Components/Carousel.tsx

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import React, { useState, useEffect } from 'react';
1+
import { useState, useEffect } from 'react';
22

33
function Carousel({ imagesUrls }: { imagesUrls: string[] }) {
44
const [currentImageIndex, setCurrentImageIndex] = useState<number>(0);

src/conference-ia/src/Components/CreateConference.tsx

+3-33
Original file line numberDiff line numberDiff line change
@@ -3,40 +3,10 @@ import { Link } from 'react-router-dom';
33

44
const CreateConference: React.FC = () => {
55
const [title, setTitle] = useState('');
6-
const [isLoading, setIsLoading] = useState(false);
7-
const [message, setMessage] = useState('');
6+
const [isLoading] = useState(false);
7+
const [message] = useState('');
88

9-
const handleSubmit = async (event: React.FormEvent) => {
10-
event.preventDefault();
11-
12-
if (!title || !title.trim()) {
13-
setMessage('Vous devez écrire un contexte.');
14-
return;
15-
}
16-
17-
setIsLoading(true);
18-
setMessage('Création en cours...');
19-
20-
try {
21-
const response = await fetch('https://api-generateconference.azurewebsites.net/Conference/CreateConference', {
22-
method: 'POST',
23-
headers: {
24-
'Content-Type': 'application/json'
25-
},
26-
body: JSON.stringify({ Prompt: title })
27-
});
28-
29-
if (response.ok) {
30-
setMessage('La conférence a été créée.');
31-
} else {
32-
setMessage('La création de la conférence a échoué.');
33-
}
34-
} catch (error) {
35-
setMessage('Une erreur est survenue lors de la création de la conférence.');
36-
} finally {
37-
setIsLoading(false);
38-
}
39-
};
9+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
4010

4111
return (
4212
<div className="flex flex-col items-center justify-center min-h-screen bg-gray-100 space-y-10">
Original file line numberDiff line numberDiff line change
@@ -1,156 +1,156 @@
1-
import React, { useState, useRef, useEffect } from "react";
2-
import "../css/Avatar.css";
3-
import * as SpeechSDK from "microsoft-cognitiveservices-speech-sdk";
4-
import { createAvatarSynthesizer, createWebRTCConnection } from "./Utility";
5-
import { avatarAppConfig } from "./config";
6-
import Carousel from "../Carousel";
7-
import DescriptionSpot from "../DescriptionSpot";
8-
import TextToSpeech from "../TextToSpeech";
9-
10-
interface AvatarProps {
11-
conferenceText: string;
12-
images : string[];
13-
}
14-
15-
const Avatar = ({ conferenceText, images }: AvatarProps) => {
16-
const [avatarSynthesizer, setAvatarSynthesizer] = useState<any>(null);
17-
const myAvatarVideoRef = useRef<HTMLDivElement>(null);
18-
const myAvatarVideoEleRef = useRef<HTMLVideoElement>(null);
19-
const myAvatarAudioEleRef = useRef<HTMLAudioElement>(null);
20-
const [description, setDescription] = useState<string>("");
21-
const [isAvatarStarted, setIsAvatarStarted] = useState<boolean>(false);
22-
const iceUrl = avatarAppConfig.iceUrl;
23-
const iceUsername = avatarAppConfig.iceUsername;
24-
const iceCredential = avatarAppConfig.iceCredential;
25-
26-
const handleOnTrack = (event: any) => {
27-
console.log("#### Printing handle onTrack ", event);
28-
console.log("Printing event.track.kind ", event.track.kind);
29-
if (event.track.kind === "video") {
30-
const mediaPlayer = myAvatarVideoEleRef.current;
31-
mediaPlayer!.id = event.track.kind;
32-
mediaPlayer!.srcObject = event.streams[0];
33-
mediaPlayer!.autoplay = true;
34-
mediaPlayer!.playsInline = true;
35-
mediaPlayer!.addEventListener("play", () => {
36-
window.requestAnimationFrame(() => {});
37-
});
38-
} else {
39-
const audioPlayer = myAvatarAudioEleRef.current;
40-
audioPlayer!.srcObject = event.streams[0];
41-
audioPlayer!.autoplay = true;
42-
audioPlayer!.muted = true;
43-
}
44-
};
45-
46-
const stopSession = () => {
47-
try {
48-
avatarSynthesizer.stopSpeakingAsync().then(() => {
49-
console.log("[" + new Date().toISOString() + "] Stop speaking request sent.");
50-
avatarSynthesizer.close();
51-
}).catch((error: any) => {
52-
console.error(error);
53-
});
54-
} catch (e) {
55-
console.error(e);
56-
}
57-
};
58-
59-
const speakSelectedText = (text : string) => {
60-
if (!avatarSynthesizer) {
61-
console.error("Avatar synthesizer is not initialized.");
62-
return;
63-
}
64-
65-
const audioPlayer = myAvatarAudioEleRef.current;
66-
audioPlayer!.muted = false;
67-
68-
avatarSynthesizer.speakTextAsync(text).then((result: any) => {
69-
if (result.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) {
70-
console.log("Speech and avatar synthesized to video stream.");
71-
setIsAvatarStarted(true);
72-
} else {
73-
console.log("Unable to speak. Result ID: " + result.resultId);
74-
if (result.reason === SpeechSDK.ResultReason.Canceled) {
75-
let cancellationDetails = SpeechSDK.CancellationDetails.fromResult(result);
76-
console.log(cancellationDetails.reason);
77-
if (cancellationDetails.reason === SpeechSDK.CancellationReason.Error) {
78-
console.log(cancellationDetails.errorDetails);
79-
}
80-
}
81-
}
82-
}).catch((error: any) => {
83-
console.error(error);
84-
if (avatarSynthesizer) {
85-
avatarSynthesizer.close();
86-
}
87-
});
88-
};
89-
90-
91-
const startSession = async () => {
92-
let peerConnection = createWebRTCConnection(
93-
iceUrl,
94-
iceUsername,
95-
iceCredential
96-
);
97-
peerConnection.ontrack = handleOnTrack;
98-
peerConnection.addTransceiver('video', { direction: 'sendrecv' });
99-
peerConnection.addTransceiver('audio', { direction: 'sendrecv' });
100-
101-
let avatarSynthesizer = createAvatarSynthesizer();
102-
setAvatarSynthesizer(avatarSynthesizer);
103-
avatarSynthesizer
104-
.startAvatarAsync(peerConnection)
105-
.then((r) => {
106-
if (r.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) {
107-
console.log("Speech and avatar synthesized to video stream.");
108-
}
109-
console.log('[' + new Date().toISOString() + '] Avatar started.');
110-
setIsAvatarStarted(true);
111-
})
112-
.catch((error) => {
113-
console.log(
114-
'[' +
115-
new Date().toISOString() +
116-
'] Avatar failed to start. Error: ' +
117-
error
118-
);
119-
});
120-
};
121-
122-
123-
124-
useEffect(() => {
125-
startSession();
126-
}, []);
127-
128-
129-
useEffect(() => {
130-
setTimeout(setDescription, 1000,conferenceText);
131-
}, [isAvatarStarted]);
132-
133-
134-
useEffect(() => {
135-
if (description) {
136-
speakSelectedText(conferenceText);
137-
}
138-
}, [description]);
139-
140-
return (
141-
<div className="container myAvatarContainer flex-row">
142-
<div className="container myAvatarVideoRootDiv d-flex justify-content-between">
1+
// import { useState, useRef, useEffect } from "react";
2+
// import "../css/Avatar.css";
3+
// import * as SpeechSDK from "microsoft-cognitiveservices-speech-sdk";
4+
// import { createAvatarSynthesizer, createWebRTCConnection } from "./Utility";
5+
// import { avatarAppConfig } from "./config";
6+
// import Carousel from "../Carousel";
7+
// import DescriptionSpot from "../DescriptionSpot";
8+
// import TextToSpeech from "../TextToSpeech";
9+
10+
// interface AvatarProps {
11+
// conferenceText: string;
12+
// images : string[];
13+
// }
14+
15+
// const Avatar = ({ conferenceText, images }: AvatarProps) => {
16+
// const [avatarSynthesizer, setAvatarSynthesizer] = useState<any>(null);
17+
// const myAvatarVideoRef = useRef<HTMLDivElement>(null);
18+
// const myAvatarVideoEleRef = useRef<HTMLVideoElement>(null);
19+
// const myAvatarAudioEleRef = useRef<HTMLAudioElement>(null);
20+
// const [description, setDescription] = useState<string>("");
21+
// const [isAvatarStarted, setIsAvatarStarted] = useState<boolean>(false);
22+
// const iceUrl = avatarAppConfig.iceUrl;
23+
// const iceUsername = avatarAppConfig.iceUsername;
24+
// const iceCredential = avatarAppConfig.iceCredential;
25+
26+
// const handleOnTrack = (event: any) => {
27+
// console.log("#### Printing handle onTrack ", event);
28+
// console.log("Printing event.track.kind ", event.track.kind);
29+
// if (event.track.kind === "video") {
30+
// const mediaPlayer = myAvatarVideoEleRef.current;
31+
// mediaPlayer!.id = event.track.kind;
32+
// mediaPlayer!.srcObject = event.streams[0];
33+
// mediaPlayer!.autoplay = true;
34+
// mediaPlayer!.playsInline = true;
35+
// mediaPlayer!.addEventListener("play", () => {
36+
// window.requestAnimationFrame(() => {});
37+
// });
38+
// } else {
39+
// const audioPlayer = myAvatarAudioEleRef.current;
40+
// audioPlayer!.srcObject = event.streams[0];
41+
// audioPlayer!.autoplay = true;
42+
// audioPlayer!.muted = true;
43+
// }
44+
// };
45+
46+
// const stopSession = () => {
47+
// try {
48+
// avatarSynthesizer.stopSpeakingAsync().then(() => {
49+
// console.log("[" + new Date().toISOString() + "] Stop speaking request sent.");
50+
// avatarSynthesizer.close();
51+
// }).catch((error: any) => {
52+
// console.error(error);
53+
// });
54+
// } catch (e) {
55+
// console.error(e);
56+
// }
57+
// };
58+
59+
// const speakSelectedText = (text : string) => {
60+
// if (!avatarSynthesizer) {
61+
// console.error("Avatar synthesizer is not initialized.");
62+
// return;
63+
// }
64+
65+
// const audioPlayer = myAvatarAudioEleRef.current;
66+
// audioPlayer!.muted = false;
67+
68+
// avatarSynthesizer.speakTextAsync(text).then((result: any) => {
69+
// if (result.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) {
70+
// console.log("Speech and avatar synthesized to video stream.");
71+
// setIsAvatarStarted(true);
72+
// } else {
73+
// console.log("Unable to speak. Result ID: " + result.resultId);
74+
// if (result.reason === SpeechSDK.ResultReason.Canceled) {
75+
// let cancellationDetails = SpeechSDK.CancellationDetails.fromResult(result);
76+
// console.log(cancellationDetails.reason);
77+
// if (cancellationDetails.reason === SpeechSDK.CancellationReason.Error) {
78+
// console.log(cancellationDetails.errorDetails);
79+
// }
80+
// }
81+
// }
82+
// }).catch((error: any) => {
83+
// console.error(error);
84+
// if (avatarSynthesizer) {
85+
// avatarSynthesizer.close();
86+
// }
87+
// });
88+
// };
89+
90+
91+
// const startSession = async () => {
92+
// let peerConnection = createWebRTCConnection(
93+
// iceUrl,
94+
// iceUsername,
95+
// iceCredential
96+
// );
97+
// peerConnection.ontrack = handleOnTrack;
98+
// peerConnection.addTransceiver('video', { direction: 'sendrecv' });
99+
// peerConnection.addTransceiver('audio', { direction: 'sendrecv' });
100+
101+
// let avatarSynthesizer = createAvatarSynthesizer();
102+
// setAvatarSynthesizer(avatarSynthesizer);
103+
// avatarSynthesizer
104+
// .startAvatarAsync(peerConnection)
105+
// .then((r) => {
106+
// if (r.reason === SpeechSDK.ResultReason.SynthesizingAudioCompleted) {
107+
// console.log("Speech and avatar synthesized to video stream.");
108+
// }
109+
// console.log('[' + new Date().toISOString() + '] Avatar started.');
110+
// setIsAvatarStarted(true);
111+
// })
112+
// .catch((error) => {
113+
// console.log(
114+
// '[' +
115+
// new Date().toISOString() +
116+
// '] Avatar failed to start. Error: ' +
117+
// error
118+
// );
119+
// });
120+
// };
121+
122+
123+
124+
// useEffect(() => {
125+
// startSession();
126+
// }, []);
127+
128+
129+
// useEffect(() => {
130+
// setTimeout(setDescription, 1000,conferenceText);
131+
// }, [isAvatarStarted]);
132+
133+
134+
// useEffect(() => {
135+
// if (description) {
136+
// speakSelectedText(conferenceText);
137+
// }
138+
// }, [description]);
139+
140+
// return (
141+
// <div className="container myAvatarContainer flex-row">
142+
// <div className="container myAvatarVideoRootDiv d-flex justify-content-between">
143143

144-
<Carousel imagesUrls={images} />
144+
// <Carousel imagesUrls={images} />
145145

146-
<TextToSpeech content={conferenceText} />
146+
// <TextToSpeech content={conferenceText} />
147147

148-
<DescriptionSpot description={description} />
148+
// <DescriptionSpot description={description} />
149149

150-
</div>
151-
</div>
152-
);
150+
// </div>
151+
// </div>
152+
// );
153153

154-
};
154+
// };
155155

156-
export default Avatar;
156+
// export default Avatar;

0 commit comments

Comments
 (0)