Ios 使用AVPlayer对象初始化SKVideoNode-SWIFT
我正在玩ARKit,并创建了一个应用程序,将视频内容叠加到可识别的图像上。代码如下:Ios 使用AVPlayer对象初始化SKVideoNode-SWIFT,ios,swift,avplayer,skvideonode,Ios,Swift,Avplayer,Skvideonode,我正在玩ARKit,并创建了一个应用程序,将视频内容叠加到可识别的图像上。代码如下: import UIKit import SceneKit import ARKit import SpriteKit class ViewController: UIViewController, ARSCNViewDelegate { @IBOutlet var sceneView: ARSCNView! override func viewDidLoad() { sup
import UIKit
import SceneKit
import ARKit
import SpriteKit
class ViewController: UIViewController, ARSCNViewDelegate {
@IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARImageTrackingConfiguration()
if let imageToTrack = ARReferenceImage.referenceImages(inGroupNamed: "ALLimages", bundle: Bundle.main) {
configuration.trackingImages = imageToTrack
configuration.maximumNumberOfTrackedImages = 3
print("Images successfully added")
}
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
// MARK: - ARSCNViewDelegate
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
var videoNode = SKVideoNode()
var videoScene = SKScene()
if let imageAnchor = anchor as? ARImageAnchor {
videoNode = SKVideoNode(fileNamed: "video1.mp4")
videoNode.play()
videoScene = SKScene(size: CGSize(width: 640, height: 360))
videoNode.position = CGPoint(x: videoScene.size.width / 2, y: videoScene.size.height / 2)
videoNode.yScale = -1.0
videoScene.addChild(videoNode)
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = videoScene
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi/2
node.addChildNode(planeNode)
}
return node
}
}
var videoNode: SKVideoNode? = {
guard let urlString = Bundle.main.path(forResource: "video1", ofType: "mov") else {
return nil
}
let url = URL(fileURLWithPath: urlString)
let item = AVPlayerItem(url: url)
let player = AVPlayer(playerItem: item)
return SKVideoNode(avPlayer: player)
}()
这个很好用。。。一旦但问题是,一旦视频播放完毕,由于SKVideoNode提供的控件有限,我无法想出如何自动重新启动视频。理想情况下,这些应该在循环中进行
我做了一些研究,似乎最好的方法是使用AVPlayer对象初始化我的视频节点
因此,我试图这样做,但无法让它工作
我在类中添加了var player=AVPlayer(),然后尝试如下初始化我的videoNode:
import UIKit
import SceneKit
import ARKit
import SpriteKit
class ViewController: UIViewController, ARSCNViewDelegate {
@IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARImageTrackingConfiguration()
if let imageToTrack = ARReferenceImage.referenceImages(inGroupNamed: "ALLimages", bundle: Bundle.main) {
configuration.trackingImages = imageToTrack
configuration.maximumNumberOfTrackedImages = 3
print("Images successfully added")
}
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
// MARK: - ARSCNViewDelegate
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
var videoNode = SKVideoNode()
var videoScene = SKScene()
if let imageAnchor = anchor as? ARImageAnchor {
videoNode = SKVideoNode(fileNamed: "video1.mp4")
videoNode.play()
videoScene = SKScene(size: CGSize(width: 640, height: 360))
videoNode.position = CGPoint(x: videoScene.size.width / 2, y: videoScene.size.height / 2)
videoNode.yScale = -1.0
videoScene.addChild(videoNode)
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = videoScene
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi/2
node.addChildNode(planeNode)
}
return node
}
}
var videoNode: SKVideoNode? = {
guard let urlString = Bundle.main.path(forResource: "video1", ofType: "mov") else {
return nil
}
let url = URL(fileURLWithPath: urlString)
let item = AVPlayerItem(url: url)
let player = AVPlayer(playerItem: item)
return SKVideoNode(avPlayer: player)
}()
然后我尝试使用用户player.play(),但视频从未播放过。相反,我的平面只是在我的图像上显示为一个空白矩形
一旦我成功地初始化了它,我想我可以添加一个观察者来检查视频何时播放完毕并重新启动,但我很难做到这一点。首先,你不需要在SCNNode中的SKScene中使用SKVideoNode。您可以直接使用AVPlayer作为SCNNode的漫反射内容:
plane.firstMaterial?.diffuse.contents=player
对于循环,您必须订阅播放机上的通知,并在结束时将时间重置为零:
NotificationCenter.default.addObserver(forName: .AVPlayerItemDidPlayToEndTime, object: player.currentItem, queue: nil) { _ in
player.seek(to: .zero)
player.play()
}
太好了,谢谢。我遇到的一个问题是,每次识别图像时,我都使用AVPlayer对象。当我添加观察者时,它只在识别的第一张图像上运行player.seek(到:.zero)。这很奇怪,因为我在这个闭包中添加了一个print语句:print(“Video finished”),这确实会在三个视频中的任何一个视频完成时打印,但它不会在多个视频上运行seek归零…只是再添加一点,我在print(“Video finished”)之后添加了另一个print语句,如下所示:print(“Video TITLE:(self.player.currentItem?.asset)”)实际上,它总是打印同一个项目,因此self.player在这里似乎总是指第一个播放的视频。但我很困惑,为什么在同一个结尾中,当其他视频播放完毕时,它会成功打印“video finished”(视频已完成),所以我认为我显然需要有3个单独的AVPlayer()为了单独控制这些-但现在我似乎处于这样一个位置,即所有三个视频重置为零,并在第一个视频完成后再次播放…似乎很奇怪,因为我有三个场景,现在包装在与识别图像名称相关的三个单独的if语句中。