swift笔记--使用CoreImage框架制作图片滤镜

import UIKit

//导入CoreImage框架,该框架提供大量高效的图像处理功能,用来对基于像素的图像进行分析,操作和特效处理

import CoreImage

class ViewController: UIViewController {

override func viewDidLoad() {

super.viewDidLoad()

// Do any additional setup after loading the view, typically from a nib.

let image = UIImage(named: "img1")

let imageView = UIImageView(image: image)

imageView.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)

self.view.addSubview(imageView)

// 初始化一个CoreImage对象,并加载之前导入的图片

let ciImage = CIImage(image: image!)

// 初始化一个颜色对象(参数值在0和1之间,alpha透明度)

let color = CIColor(red: 0.8, green: 0.6, blue: 0.4, alpha: 0.5)

// 初始化一个滤镜对象,并设置为单色调滤镜

let filter = CIFilter(name: "CIColorMonochrome")

// 输入单色调的颜色值

filter?.setValue(color, forKey: kCIInputColorKey)

// 设置单色调颜色的浓度值

filter?.setValue(1.0, forKey: kCIInputIntensityKey)

// 设置单色调滤镜的图像

filter?.setValue(ciImage, forKey: kCIInputImageKey)

// 获得应用单色调滤镜后的图像

let outImage = filter?.outputImage

// 更该图像视图的内容为应用滤镜后的图像

imageView.image = UIImage(ciImage: outImage!)

}

override func didReceiveMemoryWarning() {

super.didReceiveMemoryWarning()

// Dispose of any resources that can be recreated.

}

}

猜你喜欢

转载自blog.csdn.net/weixin_41735943/article/details/81163981