进阶功能的前提是建立在快速开始,以及基础功能的渲染场景和形象之上的。否则不会生效
FUAvatarEditKit 实现的,相关功能接口可参考接口文档。1、通过modify获取互斥协同道具列表
/**
* 获取avatar对象互斥协同的道具列表
* @param avatar 需要校验的 Avatar
* @param fileIdList 需要校验的 fileId 列表
* @param metaList 需要校验的道具道具的meta信息列表
* @throws ModifyThrowable 算法库返回结果异常
*@return FUAvatarComponentModifyResult 互斥协同结果
*
*/
fun getComponentModifyResult(
avatar: Avatar,
fileIdList: List<String>,
metaList: List<ByteArray>, block: (Result<ModifyResult>) -> Unit
) {
avatar.getComponentModifyResult(
FUInstanceBundleOPEnum.ADD,
fileIdList,
metaList,
object : OnAvatarModifyListener {
override fun onCompleted(result: FUAvatarComponentModifyResult?) {
block(parseModifyResult(avatar, result))
}
})
}
data class FUAvatarComponentModifyResult @JvmOverloads internal constructor(val isSuccess: Boolean = false) {
/**
* 需要添加的数据fileId
*/
var needAddPaths: ArrayList<String> = ArrayList()
/**
* 需要移除的数据fileId
*/
var needRemovePaths: ArrayList<String> = ArrayList()
......
}
2、下载互斥协同结果中needAddPaths中不存在的bundle道具
......
3、Avatar 根据互斥协同结果绑定或者解绑道具
val willAddBundleDataList = mutableListOf<FUBundleData>()
val willRemoveBundleDataList = mutableListOf<FUBundleData>()
modifyResult.needAddFileIdList.forEachIndexed { index, fileID ->
FUResourceHelper.getSourceBean(sourceId,resourceType)?.path?.let {
val newBundle = FUBundleData(it, fileId = fileID)
willAddBundleDataList.add(newBundle )
}
}
modifyResult.needRemoveFileIdList.forEachIndexed { index, fileID ->
avatar.getComponentByFileId(fileID)?.let {
willRemoveBundleDataList.add(it)
}
}
FUAvatarEditKit.getInstance().updateComponents(willAddBundleDataList, willRemoveBundleDataList,false)/** * @param key 道具对应的颜色key,可参考edit_color_list.json */ FUAvatarEditKit.getInstance().setColor(key, FUColorRGBData(255,255,255,255))
/** *@param key 骨骼和捏脸对应的维度key,可参考demo MeshPoints.json *@param value 调整范围0~1 */ FUAvatarEditKit.getInstance().setDeformation(key, value)
/**
* 初始化AR,在onSurfaceCreate中调用
*/
fun initAIKit() {
FUAIKit.getInstance().loadAIProcessor(DemoConstants.faceProcessBundlePath, FUAITypeEnum.FACE)//加载AI驱动
FUAIKit.getInstance().setFaceProcessorCaptureType(FUAIFaceCaptureTypeEnum.CAPTURE_WITH_TONGUE)//设置人脸驱动类型
}/**
* 开启AR
*/
private fun openAR() {
enableFaceTrack(true)//开启面驱
scene?.rendererConfig?.setEnableFakeShadow(true)//开启 Scene 场景 FakeShadow 开关
scene.rendererConfig.setEnableRenderInputData(true)//开启渲染输入数据
scene.rendererConfig.setPostProcessMirrorParam(null)//关闭Scene场景Avatar环形反射功能参数
scene.processorConfig.setEnableARModel(true)//开启AR模式
// 关闭动画功能
avatar?.animationGraph?.setAnimationGraphParam("AllBoneMaskActive", true)
}/**
*开启或关闭面驱,人脸检测和面部跟随
*@param open true:开启面驱 false:关闭面驱
*/
private fun enableFaceTrack(open:Boolean) {
if(open){
FUAIKit.getInstance().setFaceProcessorCaptureType(FUAIFaceCaptureTypeEnum.CAPTURE_WITH_TONGUE)
FUAIKit.getInstance().setFaceProcessorEnable(true)//开启面驱
avatar?.processorConfig?.setFaceProcessorHeadRotationZRange(-70f, 70f)//设置头部旋转范围
avatar?.processorConfig?.setFaceProcessorType(FUAIFaceProcessorTypeEnum.INNER)//设置人脸检测数据来源
}else{
FUAIKit.getInstance().setFaceProcessorCaptureType(FUAIFaceCaptureTypeEnum.NONE)
FUAIKit.getInstance().setFaceProcessorEnable(false)//关闭面驱
avatar?.processorConfig?.setFaceProcessorHeadRotationZRange(-180f, 180f)//设置头部旋转范围
avatar?.processorConfig?.setFaceProcessorType(FUAIFaceProcessorTypeEnum.NONE)//不使用人脸检测
}
}/**
* 开启AR
*/
private fun closeAR() {
enableFaceTrack(false)//关闭面驱
scene?.rendererConfig?.setEnableFakeShadow(false)//关闭 Scene 场景 FakeShadow 开关
scene.rendererConfig.setEnableRenderInputData(false)//关闭渲染输入数据
scene.rendererConfig.setPostProcessMirrorParam(FUPostProcessMirrorParamData(
0.7f,
30f
))//开启Scene场景Avatar环形反射功能参数
scene.processorConfig.setEnableARModel(false)//关闭AR模式
// 开启动画功能
avatar?.animationGraph?.setAnimationGraphParam("AllBoneMaskActive", false)
}/**
* 初始化AIKit,在onSurfaceCreate中调用
*/
fun initAIKit() {
FUAIKit.getInstance().loadAIProcessor(DemoConstants.faceProcessBundlePath, FUAITypeEnum.FACE)//面驱,人脸检测,面部跟随
FUAIKit.getInstance().setFaceProcessorCaptureType(FUAIFaceCaptureTypeEnum.CAPTURE_WITH_TONGUE)//人脸面驱类型
FUAIKit.getInstance().loadAIProcessor(DemoConstants.humanProcessBundlePath, FUAITypeEnum.HUMAN)//身体驱动
}/**
* 开启身体驱动
*/
fun openHumanProcessor() {
enableFaceTrack(true)//开启面驱,面部跟随
scene.processorConfig.setEnableARModel(false)//关闭AR模式
scene?.rendererConfig?.setEnableFakeShadow(false)//关闭 Scene 场景 FakeShadow 开关
scene.rendererConfig.setEnableRenderInputData(false)//关闭渲染输入数据
scene.rendererConfig.setPostProcessMirrorParam(
FUPostProcessMirrorParamData(
0.7f,
30f
)
)
FUAIKit.getInstance().setHumanProcessor3DSceneState(mFUAIHuman3DSceneStateEnum)//人体驱动 3D 功能模式
avatar?.processorConfig?.setHumanProcessorType(FUAIHumanProcessorTypeEnum.INNER)//人体动画驱动数据源类型
avatar?.processorConfig?.setHumanProcessorFootModeType(mFUAIHumanFootModeTypeEnum)//人体动画驱动跟随类型
avatar?.animationGraph?.setAnimationGraphParam("AllBoneMaskActive", false)//开启动画
FUAIKit.getInstance().setHumanProcessorEnable(true)//开启身体检测
}//设置半身驱动 mFUAIKit.setHumanProcessor3DSceneState(FUAIHuman3DSceneStateEnum.HALF) //设置全身驱动 mFUAIKit.setHumanProcessor3DSceneState(FUAIHuman3DSceneStateEnum.FULL) //关闭身体驱动 mFUAIKit.setHumanProcessor3DSceneState(FUAIHuman3DSceneStateEnum.NONE)
//设置跟随模式 avatar?.processorConfig?.setHumanProcessorFootModeType(FUAIHumanFootModeTypeEnum.STAGE) //设置固定模式 avatar?.processorConfig?.setHumanProcessorFootModeType(FUAIHumanFootModeTypeEnum.FIXED)
/**
* 关闭身体驱动
*/
fun closeHumanProcessor() {
FUAIKit.getInstance().setHumanProcessor3DSceneState(FUAIHuman3DSceneStateEnum.NONE)//关闭人体驱动 3D 功能模式
avatar?.processorConfig?.setHumanProcessorType(FUAIHumanProcessorTypeEnum.NONE)//关闭人体动画驱动
FUAIKit.getInstance().setHumanProcessorEnable(false)//关闭身体检测
}/** *与释放renderKit类似,在onGLContextDestroy中进行资源释放 */ FUAIKit.getInstance().releaseAllAIProcessor()
implementation "com.faceunity.gpb:sta:1.1.4"//sta语音驱动组件 FUStaManager.registerStaInternalCheck(FUDevHelper.getInstance().getAuthPack())//sta鉴权
/**
*@param data_decoder 对应demo中sta目录下的data_decoder.bin
*@param data_bs 对应demo中sta目录下的defaultBSConfig.bin
*@param langType 语言类型 如:中文-FULangTypeEnum.CHINESE
*/
val phoneDecoder = FUFileUtils.readByteArrayByPath(initSpeechConfig.data_decoder)
val phoneConfig = FUFileUtils.readByteArrayByPath(initSpeechConfig.data_bs)
if (phoneDecoder != null && phoneConfig != null) {
StaProcessingModuleSync.getInstance().initStaEngine(
ByteArray(0),
config.langType,
phoneConfig,
phoneDecoder
)
}/**
* GL线程调用设置是否开启嘴型动画
* @param enableDefault
* true:关闭嘴型动画控制,由avatar形象自身动画控制嘴型;
* false:开启嘴型动画控制,通过设置表情系数来控制嘴型。
*/
private fun setCurrentAvatarBlendShapeWeight(enableDefault: Boolean) {
avatar?.animationGraph?.setAnimationGraphParam("MaskActive", !enableDefault, false)
}1、根据语音的时间戳获取并缓存表情系数,具体可参考demo中STARenderControlImpl实现
StaProcessingModuleSync.getInstance().appendPhonemesFromAudio(
phonemeTimestamp.toByteArray(),
streamType, config.langType, typeEnum
)
2、根据语音播报进度,从缓存中查询表情系数,并设置给avatar
/**
* GL线程调用设置当前表情系数
* @return Long
*/
private fun setCurrentAvatarExpression() {
val positionIndex = mediaPlay.getCurrentPosition()
if (config.isParseTextProcess) {
val currentTime = positionIndex / 1000f
timestampParser.use(currentTime).run {
if (this.isNotEmpty()) {
this.forEach {
callbackMap[currentControlId]?.onPhonemeAction(it.content)
}
}
}
}
val expression = StaProcessingModuleSync.getInstance().getExpressionByAudioPosition(positionIndex)
expression?.let {
val avatar = avatar ?: return
avatar.blendShape.setBlendShape("AIBS", "head", expression, false)
}
}setCurrentAvatarBlendShapeWeight(true) StaProcessingModuleSync.getInstance().releaseSTAEngine()