准备:

  1. 信令服务
  2. 前端页面用于视频通话

demo github 地址。

前端页面

为了使 demo 尽量简单,功能页面如下,即包含登录、通过对方手机号拨打电话的功能。在实际生成过程中,未必使用的手机号,可能是任何能代表用户身份的字符串。

代码如下:

<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<div style="margin: 20px">
<label for="loginAccount">登录账号</label><input id="loginAccount" name="loginAccount" placeholder="请输入手机号"
type="text">
<button id="login" onclick="login()" type="button">登录</button>
</div>
<div style="margin: 20px">
<video autoplay controls height="360px" id="localVideo" width="640px"></video>
<video autoplay controls height="360px" id="remoteVideo" width="640px"></video>
</div> <div style="margin: 20px">
<label for="toAccount">对方账号</label>
<input id="toAccount" name="toAccount" placeholder="请输入对方手机号" type="text">
<button id="requestVideo" onclick="requestVideo()" type="button">请求视频通话</button>
</div> <div style="margin: 20px">
<fieldset>
<button id="accept" type="button">接通</button>
<button id="hangup" type="button">挂断</button>
</fieldset>
</div> <div style="margin: 20px">
<fieldset>
<div>
录制格式: <select disabled id="codecPreferences"></select>
</div>
<button id="startRecord" onclick="startRecording()" type="button">开始录制视频</button>
<button id="stopRecord" onclick="stopRecording()" type="button">停止录制视频</button>
<button id="downloadRecord" onclick="download()" type="button">下载</button>
</fieldset>
</div> </body> <script>
let config = {
iceServers: [
{
'urls': 'turn:turn.wildfirechat.cn:3478',
'credential': 'wfchat',
'username': 'wfchat'
}
]
} const localVideo = document.getElementById('localVideo');
const remoteVideo = document.getElementById('remoteVideo'); const requestVideoButton = document.getElementById('requestVideo');
const acceptButton = document.getElementById('accept');
const hangupButton = document.getElementById('hangup'); const codecPreferences = document.querySelector('#codecPreferences'); const recordButton = document.getElementById('startRecord')
const stopRecordButton = document.getElementById('stopRecord')
const downloadButton = document.getElementById('downloadRecord') const wsAddress = 'ws://localhost:9113/ws';
let loginAttemptCount = 0;
let myId, toId;
let pc, localStream, ws; let mediaRecorder;
let recordedBlobs; function login() {
loginAttemptCount = 0; myId = document.getElementById('loginAccount').value; ws = new WebSocket(wsAddress);
ws.onopen = function () {
console.log("WebSocket is open now.");
connect();
alert("登录成功");
}; ws.onmessage = function (message) {
let msg = JSON.parse(message.data);
console.log("ws 收到消息:" + msg.type);
switch (msg.type) {
case "offline": {
if (loginAttemptCount < 10) {
setTimeout(() => {
loginAttemptCount++;
watch();
}, 1000);
}
break;
}
case "watch": {
handleWatch(msg);
break;
}
case "offer": {
handleOffer(msg);
break;
}
case "answer": {
handleAnswer(msg);
break;
}
case "candidate": {
handleCandidate(msg);
break;
}
case "hangup": {
handleHangup(msg);
break;
}
}
};
} requestVideoButton.onclick = async () => {
toId = document.getElementById('toAccount').value; if (!myId) {
alert('请先登录');
return;
} if (!toId) {
alert('请输入对方手机号');
return;
} watch(); localStream = await navigator.mediaDevices.getUserMedia({audio: true, video: true});
localVideo.srcObject = localStream; createPeerConnection();
} function connect() {
send({
type: "connect",
from: myId
});
} function handleWatch(msg) {
toId = msg.from;
} acceptButton.onclick = async () => {
localStream = await navigator.mediaDevices.getUserMedia({audio: true, video: true});
localVideo.srcObject = localStream;
createPeerConnection(); pc.createOffer().then(offer => {
pc.setLocalDescription(offer);
send({
type: 'offer',
from: myId,
to: toId,
data: offer
});
});
} function handleOffer(msg) {
pc.setRemoteDescription(msg.data); pc.createAnswer().then(answer => {
pc.setLocalDescription(answer);
send({
type: "answer",
from: myId,
to: toId,
data: answer
});
});
} function watch() {
send({
type: 'watch',
from: myId,
to: toId
});
} function handleAnswer(msg) {
if (!pc) {
console.error('no peer connection');
return;
}
pc.setRemoteDescription(msg.data);
} function handleCandidate(msg) {
if (!pc) {
console.error('no peer connection');
return;
}
pc.addIceCandidate(new RTCIceCandidate(msg.data)).then(() => {
console.log('candidate添加成功')
}).catch(handleError)
} function handleError(error) {
console.log(error);
} function createPeerConnection() {
pc = new RTCPeerConnection(config);
pc.onicecandidate = e => {
if (e.candidate) {
send({
type: "candidate",
from: myId,
to: toId,
data: e.candidate
});
}
}; pc.ontrack = e => remoteVideo.srcObject = e.streams[0];
localStream.getTracks().forEach(track => pc.addTrack(track, localStream));
} hangupButton.onclick = async () => {
if (pc) {
pc.close();
pc = null;
}
if (localStream) {
localStream.getTracks().forEach(track => track.stop());
localStream = null;
}
send({
type: "hangup",
from: myId,
to: toId
});
} function handleHangup() {
if (!pc) {
console.error('no peer connection');
return;
}
pc.close();
pc = null;
if (localStream) {
localStream.getTracks().forEach(track => track.stop());
localStream = null;
}
console.log('hangup');
} function send(msg) {
ws.send(JSON.stringify(msg));
} function getSupportedMimeTypes() {
const possibleTypes = [
'video/webm;codecs=vp9,opus',
'video/webm;codecs=vp8,opus',
'video/webm;codecs=h264,opus',
'video/mp4;codecs=h264,aac',
];
return possibleTypes.filter(mimeType => {
return MediaRecorder.isTypeSupported(mimeType);
});
} function startRecording() {
recordedBlobs = [];
getSupportedMimeTypes().forEach(mimeType => {
const option = document.createElement('option');
option.value = mimeType;
option.innerText = option.value;
codecPreferences.appendChild(option);
});
const mimeType = codecPreferences.options[codecPreferences.selectedIndex].value;
const options = {mimeType}; try {
mediaRecorder = new MediaRecorder(remoteVideo.srcObject, options);
} catch (e) {
console.error('Exception while creating MediaRecorder:', e);
alert('Exception while creating MediaRecorder: ' + e);
return;
} console.log('Created MediaRecorder', mediaRecorder, 'with options', options);
recordButton.textContent = 'Stop Recording';
mediaRecorder.onstop = (event) => {
console.log('Recorder stopped: ', event);
console.log('Recorded Blobs: ', recordedBlobs);
};
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start();
console.log('MediaRecorder started', mediaRecorder);
} function handleDataAvailable(event) {
console.log('handleDataAvailable', event);
if (event.data && event.data.size > 0) {
recordedBlobs.push(event.data);
}
} function stopRecording() {
mediaRecorder.stop();
} function download() {
const blob = new Blob(recordedBlobs, {type: 'video/webm'});
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.style.display = 'none';
a.href = url;
a.download = 'test.webm';
document.body.appendChild(a);
a.click();
setTimeout(() => {
document.body.removeChild(a);
window.URL.revokeObjectURL(url);
}, 100);
} </script>
</html>

信令服务

基于 JDK 1.8 Spring Boot、Netty 搭建,主要用于解决两个问题:

  1. 确认参与人,即拨打视频电话的人和接通视频电话的人
  2. 提供功能按钮 API,比如:发起视频通话、挂电话、以及 webRTC 建立通信通道

主要功能如下:

switch (event.getType()) {
case "connect": {
USER_MAP.put(event.getFrom(), ctx);
break;
}
case "watch": {
WebRtcEvent watchRequest = new WebRtcEvent();
if (USER_MAP.containsKey(event.getTo())) {
watchRequest.setType("watch");
watchRequest.setFrom(event.getFrom());
watchRequest.setTo(event.getTo());
USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(watchRequest)));
} else {
watchRequest.setType("offline");
USER_MAP.get(event.getFrom()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(watchRequest)));
}
break;
}
case "offer": {
WebRtcEvent offerRequest = new WebRtcEvent();
offerRequest.setType("offer");
offerRequest.setFrom(event.getFrom());
offerRequest.setTo(event.getTo());
offerRequest.setData(event.getData());
USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(offerRequest)));
break;
}
case "answer": {
WebRtcEvent answerRequest = new WebRtcEvent();
answerRequest.setType("answer");
answerRequest.setFrom(event.getFrom());
answerRequest.setData(event.getData());
USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(answerRequest)));
break;
}
case "candidate": {
WebRtcEvent candidateRequest = new WebRtcEvent();
candidateRequest.setType("candidate");
candidateRequest.setFrom(event.getFrom());
candidateRequest.setData(event.getData());
USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(candidateRequest)));
break;
}
case "hangup": {
WebRtcEvent hangupRequest = new WebRtcEvent();
hangupRequest.setType("hangup");
hangupRequest.setFrom(event.getFrom());
hangupRequest.setTo(event.getTo());
USER_MAP.get(event.getTo()).writeAndFlush(new TextWebSocketFrame(JSONObject.toJSONString(hangupRequest)));
break;
}
}

connect -> 登录

与 html 页面中的“登录”按钮对应,当输入手机号后,点击登录,手机号将会在信令服务中存到 map 中,以待后续操作使用。

如下图所示,至少两个客户端登录以后,才能正常视频通话。

watch -> 请求视频通话

点击 watch 按钮后,前端将发送一个事件到信令服务中,结构如下:

{
type: 'watch', //事件类型
from: 13789122381, // 我的账号,比如 13789122381
to: 1323493929 // 对方的账号,比如 1323493929
}

此时输入的对方账号对应 “to” 字段。

信令服务器收到 watch 事件后,从 map 中找出对应的在线客户端,将该事件转发至相应的客户端中。

offer -> 接通

对于接收者来说,点击“接通”按钮以后,webRTC 将开始建立通信隧道。

接通的 json 结构如下:

{
type: 'offer',
from: myId,
to: toId,
data: offer
}

整个拨打电话、接通的流程如下:

总结

在 html 中还需要配置 coturn TURN 服务 地址,我在 demo 中使用的地址是测试地址,所以请不要在生产中使用。

webRTC demo的更多相关文章

  1. WebRTC Demo - getUserMedia()

    WebRTC介绍 WebRTC提供三类API: MediaStream,即getUserMedia RTCPeerConnection RTCDataChannel getUserMedia已经由Ch ...

  2. Qt WebRTC demo

    This is a very simple demonstration of how to stream from a native application to the browser using ...

  3. WebRTC学习与DEMO资源一览

    一. WebRTC学习 1.1   WebRTC现状 本人最早接触WebRTC是在2011年底,那时Google已经在Android源码中加入了webrtc源码,放在/external/webrtc/ ...

  4. Android IOS WebRTC 音视频开发总结(六二)-- 大数据解密国外实时通讯行业开发现状

    本文主要介绍国外实时通讯行业现状,文章最早发表在我们的微信公众号上,详见这里,欢迎关注微信公众号blackerteam,更多详见www.blackerteam.com 上篇文章我们采用百度搜索指数来分 ...

  5. webrtc学习(一): webrtc开始

    一. 编译webrtc 1. 预先准备 1)  vpn. 用于同步代码. 这里给一个大概的估计吧. windows端包含vs2013 win8sdk wdk chromium源码等等, 总共需要至少8 ...

  6. WebRTC学习笔记_Demo收集

    1.     WebRTC学习 1.1   WebRTC现状 本人最早接触WebRTC是在2011年底,那时Google已经在Android源代码中增加了webrtc源代码,放在/external/w ...

  7. WebRTC–getUserMedia & Canvas

    下面是一个使用getUserMedia接口和Canvas的drawImage方法实现的截图功能(截取视频中的一帧). 基本思路是这样子的: getUserMedia获取一个MediaStream, s ...

  8. iOS下WebRTC音视频通话(二)-局域网内音视频通话

    这里是iOS 下WebRTC音视频通话开发的第二篇,在这一篇会利用一个局域网内音视频通话的例子介绍WebRTC中常用的API. 如果你下载并编译完成之后,会看到一个iOS 版的WebRTC Demo. ...

  9. WebRTC技术调研

    相关网址: 协议:https://www.w3.org/TR/webrtc/ https://apprtc.webrtc.org/ https://apprtc.appspot.com/ https: ...

随机推荐

  1. 逻辑判断与if and while循环结构

    逻辑判断与if and while循环结构 逻辑判断 逻辑运算符在进行逻辑判断时遇到打印输出命令时 and 当碰到一个条件为False时那么整个条件即为False,当碰到第一个为True时如果之后的值 ...

  2. Java连接简单使用ElasticSearch

    目录 1. 添加依赖 2. 代码,无账号密码 3. 代码,有账号密码,并且是https方式 4. 参考文章 1. 添加依赖 <!-- https://mvnrepository.com/arti ...

  3. 监控linux多个cpu的负载情况

    监控linux多个cpu的负载情况 top然后按数字键1

  4. 操作系统学习笔记10 | I/O、显示器与键盘

    从这一部分开始介绍操作系统的设备驱动,操作系统通过文件系统的抽象驱动设备让用户能够使用显示器.键盘等交互工具.并讲解printf和scanf是如何实现敲下键盘将字符显示到屏幕上的. 参考资料: 课程: ...

  5. **手把手教你安装 Anaconda + Tensor flow+Pycharm**

    手把手教你安装 Anaconda + Tensor flow+Pycharm 这篇博文主要讲一下自己安装Anaconda + Tensor flow + Pycharm 的一个过程. 1. ANACO ...

  6. 使用Pipeline抽象业务生命周期流程

    上篇关于流程引擎的文章还是快两年以前的<微服务业务生命周期流程管控引擎>,这中间各种低代码平台层出不穷,虽然有些仅仅是OA+表单的再度包装,但有些的确是在逻辑和操作单元层面进行了真正的高度 ...

  7. 《Java基础——制表符》

    Java基础--制表符       规则: 若前面输出内容不为8的倍数,则通过空格补全. 不足八位,补全八位.   例一:不足八位: System.out.println("123456&q ...

  8. 【原创】FFMPEG录屏入门指南

    下载ffmpeg 点击 ffmpeg官网,选择windows,然后点击Windows builds from gyan.dev: 也可以直接点击 https://www.gyan.dev/ffmpeg ...

  9. 使用filebeat收集k8s上pod里的容器日志配置文件模板

    具体使用有待商榷 filebeat.inputs: - type: container paths: - /var/log/containers/*.log processors: - add_kub ...

  10. 工业互联网领域的企业,都已经接入了ERP或者MES系统了吗?

    肯定不是得啊!之前的两化,后来的企业上云,到当下的智能制造.数字化转型,不都是想把制造业(也就是你说的工业互联网企业)往这个方向推么,ERP和MES是企业数字化的一部分,但不是全部,当然有的企业(小工 ...