本文共 6420 字,大约阅读时间需要 21 分钟。
import numpy as npimport cv2import osfrom scipy import ndimagefrom skimage.io import imread, imsavefrom osgeo import gdaldef sift_kp(image): gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) sift = cv2.xfeatures2d_SIFT.create() kp, des = sift.detectAndCompute(gray_image, None) kp_image = cv2.drawKeypoints(gray_image, kp, None) return kp_image, kp, desdef get_good_match(des1, des2): bf = cv2.BFMatcher() matches = bf.knnMatch(des1, des2, k=2) good = [] for m, n in matches: if m.distance < 0.75 * n.distance: good.append(m) return gooddef surf_kp(image): '''SIFT(surf)特征点检测(速度比sift快),精度差了很多''' height, width = image.shape[:2] size = (int(width * 0.2), int(height * 0.2)) shrink = cv2.resize(image, size, interpolation=cv2.INTER_AREA) gray_image = cv2.cvtColor(shrink, cv2.COLOR_BGR2GRAY) surf = cv2.xfeatures2d_SURF.create() kp, des = surf.detectAndCompute(gray_image, None) kp_image = cv2.drawKeypoints(gray_image, kp, None) return kp_image, kp, desdef siftImageAlignment(img1, img2): _, kp1, des1 = sift_kp(img1) _, kp2, des2 = sift_kp(img2) goodMatch = get_good_match(des1, des2) if len(goodMatch) > 4: ptsA = np.float32([kp1[m.queryIdx].pt for m in goodMatch]).reshape(-1, 1, 2) ptsB = np.float32([kp2[m.trainIdx].pt for m in goodMatch]).reshape(-1, 1, 2) ransacReprojThreshold = 4 H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold); imgOut = cv2.warpPerspective(img2, H, (img1.shape[1], img1.shape[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) return imgOut, H, status# img1 = cv2.imread("/home/laoer/五一前解决/船只与水/122/1-00000122.jpg")# img2 = cv2.imread("/home/laoer/五一前解决/船只与水/122/2-00000122.jpg")# img3 = cv2.imread("/home/laoer/五一前解决/船只与水/122/3-00000122.jpg")# img4 = cv2.imread("/home/laoer/五一前解决/船只与水/122/4-00000122.jpg")# img5 = cv2.imread("/home/laoer/五一前解决/船只与水/122/5-00000122.jpg")# img6 = cv2.imread("/home/laoer/五一前解决/船只与水/122/6-00000122.jpg")# result1, _, _ = siftImageAlignment(img1, img2)# result2, _, _ = siftImageAlignment(img1, img3)# result3, _, _ = siftImageAlignment(img1, img4)# result4, _, _ = siftImageAlignment(img1, img5)# result5, _, _ = siftImageAlignment(img1, img6)## allImg = cv2.merge([img1[:, :, 0], result1[:, :, 0], result2[:, :, 0]])# cv2.namedWindow('Result', cv2.WINDOW_NORMAL)# cv2.imshow('Result', allImg)# cv2.waitKey(0)reg_path = '/home/laoer/五一前解决/船只与水/122'path_list = os.listdir(reg_path)path_list.sort()img = [0] * len(path_list)H = [0] * (len(path_list) - 1)for i, path in enumerate(path_list): print(i) img[i] = cv2.imread(os.path.join(reg_path, path)) # cv2.imwrite('/home/laoer/五一前解决/配准多波段文件/'+'complete{}.png'.format(i),img[i])for i in range(5): # 提取所有变换矩阵 _, H[i], _ = siftImageAlignment(img[0], img[i + 1])oil_path = '/home/laoer/五一前解决/溢油筛选/004'path_list = os.listdir(oil_path)path_list.sort()num = int(len(path_list) / 6)mul_img = [0] * numimg = [0] * len(path_list)for i, path in enumerate(path_list): img[i] = cv2.cvtColor(cv2.imread(os.path.join(oil_path, path)), cv2.COLOR_BGR2GRAY) s = int(i / num) if s != 0: img[i] = cv2.warpPerspective(img[i], H[s - 1], (img[i].shape[1], img[i].shape[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)# BGR=513# pp = '/home/laoer/五一前解决/配准彩色'# for i in range(num):# bgr = cv2.merge([img[i + num * 4 - 1], img[i + num * 0 ], img[i + num * 2 - 1]])# cv2.imwrite(os.path.join(pp, 'BGR{}.png'.format(i)), bgr)# cv2.namedWindow('BGR', cv2.WINDOW_NORMAL)# cv2.imshow("BGR", bgr)# cv2.waitKey()# for i in range(num):# mul_img[i] = cv2.merge([img[i + 6 * d] for d in range(6)])# for d in range(6):# cv2.imwrite('/home/laoer/五一前解决/配准多波段文件/'+'complete{}-{}.png'.format(i,d),mul_img[i][:,:,d])## # imsave(dstfile, mul_img[i][:,:,:], plugin="tifffile")# print(np.shape(mul_img[i]))driver = gdal.GetDriverByName("GTiff")for i in range(num): dataset = driver.Create('/home/laoer/五一前解决/新/' + 'complete{}.tif'.format(i), 2448, 2048, 6, gdal.GDT_UInt16) for j in range(6): dataset.GetRasterBand(j + 1).WriteArray(img[i + j * num]) del dataset
然后还有测试结果
import numpy as npimport cv2import osfrom scipy import ndimagefrom skimage.io import imread, imsavefrom osgeo import gdalpp = '/home/laoer/五一前解决/检测结果'def open_f(image): # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) binary=image kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10)) binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel) return binarywith open(os.path.join(pp,"result.txt"),"w") as f: f.write("图像面积\n") for i in range(11): dataset = gdal.Open('/home/laoer/五一前解决/新/' + 'complete{}.tif'.format(i)) num = dataset.RasterCount print(num) band = [0] * num path = os.path.join(pp, 'result{}.png'.format(i)) for j in range(num): band[j] = dataset.GetRasterBand(j + 1).ReadAsArray(0, 0, 2448, 2048) band[0] = (band[0] > 180) band[1] = (band[1]>30) band[2] = (band[2]>80) band[3] = (band[3] > 170) band[4] = (band[4] > 175) band[5] = (band[5] >24) for m in range(num - 1): if m == 0: result = band[m] result = (result & band[m]) result = np.uint8(result) * 255 result=open_f(result) area=np.sum(result==255) f.write("第{}张图片溢油面积:{}个像元\n".format(i+1,area)) cv2.imwrite(path,result) cv2.namedWindow('result', cv2.WINDOW_NORMAL) cv2.imshow('result', result) cv2.waitKey()
在这里记录一下一些小细节。
首先是图像配准,由于所得到的数据就是一些简单的png图像,所以定位信息啥的是不要想的,也别想用envi这些软件操作了,因此首先要进行图像配准,再这里采用sift算子作为配准方法,不过由于sift被申请专利了,所以只能用老版本的opencv,我用的是3点几版本的,相关信息可以搜到不做赘述 准备好cv之后,就是提取特征点,然后这里不是有船只和水面的图像嘛,就用它来提取变换矩阵,然后就得到了不同的波段配准到第一波段的矩阵,这里想配准到哪个波段都是可以的,只不过需要相应改动代码而已。 在完成配准之后呢,就需要输出图像了,由于opencv,skimage等都只能输出1、3、4波段的图像,所以只能用gdal输出图像,具体操作细节可以搜索或者看代码可以刻得出。配准之后就需要进行数据分析了,那么在第二段代码中,首先需要把图像分波段按照矩阵读取出来,仅仅GetRasterBand是不够的,还需要ReadAsArray,这样就得到了uint16的数组,然后经过我的分析,这些数组比较合适的阈值分别如代码中所示。至于每个波段分别是什么波长范围,就不用透露了,毕竟每个人用的光谱仪不一样,都需要各自分析的。
其实这里我也曾经想过用神经网络,肯定非常好训练,只需要圈定几个roi,再将其作为向量输出,直接经过几个全连接层就完事,但是我实际上并不知道比较可靠的油污数据是什么样的,况且这么点数据实在是没有说服力,因此不做这样处理。 配准之后呢就可以进行波段分析了,由于给的各个图像都是中心区域大量油的,所以可以放心的直接观察矩阵进行分析。转载地址:http://cmhrn.baihongyu.com/