http://codingdict.com/sources/py/PIL.Image/3732.html
项目:demosys-py 作者:Contraz | 项目源码 | 文件源码
def create(format='png'): """ Create a screenshot :param format: formats supported by PIL (png, jpeg etc) """ dest = "" if not settings.SCREENSHOT_PATH: print("SCREENSHOT_PATH not defined in settings. Using cwd as fallback.") if settings.SCREENSHOT_PATH: if os.path.exists(settings.SCREENSHOT_PATH): dest = settings.SCREENSHOT_PATH else: print("SCREENSHOT_PATH {} does not exist. Using cwd as fallback".format(settings.SCREENSHOT_PATH)) x, y, width, height = GL.glGetIntegerv(GL.GL_VIEWPORT) print("Screenshot viewport:", x, y, width, height) GL.glPixelStorei(GL.GL_PACK_ALIGNMENT, 1) data = GL.glReadPixels(x, y, width, height, GL.GL_RGB, GL.GL_UNSIGNED_BYTE) image = Image.frombytes("RGB", (width, height), data) image = image.transpose(Image.FLIP_TOP_BOTTOM) name = "{}.{}".format(datetime.now().strftime("%Y-%m-%d-%H-%M-%S"), format) image.save(os.path.join(dest, name), format=format)
项目:Maps 作者:DarkPurple141 | 项目源码 | 文件源码
def v_fx(screen): dims = screen.get_size() im1 = pygame.image.tostring(screen,'RGB') im = Image.frombytes('RGB',(dims),im1) im1 = im.filter(ImageFilter.BLUR) im1.save('test.png','PNG') return pygame.image.load('test.png')
项目:imagepaste 作者:robinchenyu | 项目源码 | 文件源码
def load(self, im): im.fp.seek(0) # rewind return Image.frombytes( "RGB", im.size, Image.core.drawwmf(im.fp.read(), im.size, self.bbox), "raw", "BGR", (im.size[0]*3 + 3) & -4, -1 )
项目:imagepaste 作者:robinchenyu | 项目源码 | 文件源码
def open(filename): # FIXME: modify to return a WalImageFile instance instead of # plain Image object ? if hasattr(filename, "read"): fp = filename else: fp = builtins.open(filename, "rb") # read header fields header = fp.read(32+24+32+12) size = i32(header, 32), i32(header, 36) offset = i32(header, 40) # load pixel data fp.seek(offset) im = Image.frombytes("P", size, fp.read(size[0] * size[1])) im.putpalette(quake2palette) im.format = "WAL" im.format_description = "Quake2 Texture" # strings are null-terminated im.info["name"] = header[:32].split(b"\0", 1)[0] next_name = header[56:56+32].split(b"\0", 1)[0] if next_name: im.info["next_name"] = next_name return im
项目:imagepaste 作者:robinchenyu | 项目源码 | 文件源码
def grab(bbox=None): if sys.platform == "darwin": f, file = tempfile.mkstemp('.png') os.close(f) subprocess.call(['screencapture', '-x', file]) im = Image.open(file) im.load() os.unlink(file) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
项目:Instagram-API 作者:danleyb2 | 项目源码 | 文件源码
def createIconGD(file, size=100, raw=True): """ Implements the actual logic behind creating the icon/thumbnail :type file: str :param file: path to the file name :rtype: image :return: icon/thumbnail for the video """ image = Image.open(file) width, height = image.size if width > height: y = 0 x = (width - height) / 2 smallestSide = height else: x = 0 y = (height - width) / 2 smallestSide = width # image_p = Image.new('RGB',(size, size)) # image = Image.frombytes('RGBa',(size,size),file_get_contents(file)) image.thumbnail((size, size)) ##todo convert to jpeg i = image.tobytes() image.close() # image_p.close() return i
项目:StratoBalloon 作者:delattreb | 项目源码 | 文件源码
def display(self, image): """ Takes an image, scales it according to the nominated transform, and stores it for later building into an animated GIF. """ assert (image.size[0] == self.width) assert (image.size[1] == self.height) surface = self.to_surface(image) rawbytes = self._pygame.image.tostring(surface, "RGB", False) im = Image.frombytes("RGB", (self.width * self.scale, self.height * self.scale), rawbytes) self._images.append(im) self._count += 1 logger.debug("Recording frame: {0}".format(self._count)) if self._max_frames and self._count >= self._max_frames: sys.exit(0)
项目:Material-Design-Avatar 作者:today4king | 项目源码 | 文件源码
def avatar_gen_img(self): font_size = int(self.size / 10 * 8) pic_size = self.size an, is_letter = self.avatar_name() font = self.zh_font_file_name if is_letter: font = self.en_font_file_name font_size = int(self.size / 10 * 11) font_file = os.path.abspath(os.path.join(self.font_dir, font)) pygame.init() f = pygame.font.Font(font_file, font_size) is_light=self.is_light_color(self.avatar_background_color()) rtext = f.render(an.upper(), True, (0,0,0) if is_light else (255, 255, 255)) # pygame.image.save(rtext, '%s.png' % an) mode = 'RGBA' astr = pygame.image.tostring(rtext, 'RGBA') circle = Image.new("RGBA", (self.size, self.size)) word = Image.frombytes(mode, f.size(an), astr) word_x = int((pic_size - word.size[0]) / 2) word_y = int(word_x * 0.9) if is_letter: word_y = int((pic_size - word.size[1]) / 2) draw = ImageDraw.Draw(circle) draw.ellipse((0, 0, self.size , self.size ), fill=self.avatar_background_color(), outline=self.avatar_background_color()) draw.point((100, 100), 'red') r, g, b, a = word.split() circle.paste(word, (word_x, word_y), a) sharpness = ImageEnhance.Sharpness(circle) # circle = sharpness.enhance(7.0) # im.show() # circle.show() # print(circle) return circle
项目:ascii-art-py 作者:blinglnav | 项目源码 | 文件源码
def load(self, im): im.fp.seek(0) # rewind return Image.frombytes( "RGB", im.size, Image.core.drawwmf(im.fp.read(), im.size, self.bbox), "raw", "BGR", (im.size[0]*3 + 3) & -4, -1 )
项目:ascii-art-py 作者:blinglnav | 项目源码 | 文件源码
def open(filename): """ Load texture from a Quake2 WAL texture file. By default, a Quake2 standard palette is attached to the texture. To override the palette, use the putpalette method. :param filename: WAL file name, or an opened file handle. :returns: An image instance. """ # FIXME: modify to return a WalImageFile instance instead of # plain Image object ? if hasattr(filename, "read"): fp = filename else: fp = builtins.open(filename, "rb") # read header fields header = fp.read(32+24+32+12) size = i32(header, 32), i32(header, 36) offset = i32(header, 40) # load pixel data fp.seek(offset) im = Image.frombytes("P", size, fp.read(size[0] * size[1])) im.putpalette(quake2palette) im.format = "WAL" im.format_description = "Quake2 Texture" # strings are null-terminated im.info["name"] = header[:32].split(b"\0", 1)[0] next_name = header[56:56+32].split(b"\0", 1)[0] if next_name: im.info["next_name"] = next_name return im
项目:ascii-art-py 作者:blinglnav | 项目源码 | 文件源码
def grab(bbox=None): if sys.platform == "darwin": fh, filepath = tempfile.mkstemp('.png') os.close(fh) subprocess.call(['screencapture', '-x', filepath]) im = Image.open(filepath) im.load() os.unlink(filepath) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
项目:radar 作者:amoose136 | 项目源码 | 文件源码
def load(self, im): im.fp.seek(0) # rewind return Image.frombytes( "RGB", im.size, Image.core.drawwmf(im.fp.read(), im.size, self.bbox), "raw", "BGR", (im.size[0]*3 + 3) & -4, -1 )
项目:radar 作者:amoose136 | 项目源码 | 文件源码
def open(filename): # FIXME: modify to return a WalImageFile instance instead of # plain Image object ? if hasattr(filename, "read"): fp = filename else: fp = builtins.open(filename, "rb") # read header fields header = fp.read(32+24+32+12) size = i32(header, 32), i32(header, 36) offset = i32(header, 40) # load pixel data fp.seek(offset) im = Image.frombytes("P", size, fp.read(size[0] * size[1])) im.putpalette(quake2palette) im.format = "WAL" im.format_description = "Quake2 Texture" # strings are null-terminated im.info["name"] = header[:32].split(b"\0", 1)[0] next_name = header[56:56+32].split(b"\0", 1)[0] if next_name: im.info["next_name"] = next_name return im
项目:radar 作者:amoose136 | 项目源码 | 文件源码
def grab(bbox=None): if sys.platform == "darwin": f, file = tempfile.mkstemp('.png') os.close(f) subprocess.call(['screencapture', '-x', file]) im = Image.open(file) im.load() os.unlink(file) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
项目:piqueserver 作者:piqueserver | 项目源码 | 文件源码
def get_overview(self): current_time = reactor.seconds() if (self.last_overview is None or self.last_map_name != self.protocol.map_info.name or current_time - self.last_overview > OVERVIEW_UPDATE_INTERVAL): overview = self.protocol.map.get_overview(rgba=True) image = Image.frombytes('RGBA', (512, 512), overview) data = BytesIO() image.save(data, 'png') self.overview = data.getvalue() self.last_overview = current_time self.last_map_name = self.protocol.map_info.name return self.overview
项目:Cozmo-Explorer-Tool 作者:GrinningHermit | 项目源码 | 文件源码
def create_default_image(image_width, image_height, do_gradient=False): """Create a place-holder PIL image to use until we have a live feed from Cozmo""" image_bytes = bytearray([0x70, 0x70, 0x70]) * image_width * image_height if do_gradient: i = 0 for y in range(image_height): for x in range(image_width): image_bytes[i] = int(255.0 * (x / image_width)) # R image_bytes[i+1] = int(255.0 * (y / image_height)) # G image_bytes[i+2] = 0 # B i += 3 image = Image.frombytes('RGB', (image_width, image_height), bytes(image_bytes)) return image
项目:Boundary-Trees 作者:jayricco | 项目源码 | 文件源码
def boundary_tree_to_image(boundary_tree, size, image_mesh): arr = array('B') np.apply_along_axis(lambda c: arr.extend(boundary_tree.query(c)), 1, image_mesh) return Image.frombytes("RGB", size, arr)
项目:DHP 作者:YuhangSong | 项目源码 | 文件源码
def get_view(input_width,input_height,view_fov_x,view_fov_y,view_center_lat,view_center_lon,output_width,output_height,cur_frame,file_,is_render=False,temp_dir=""): temp_1=temp_dir+"1.yuv" import config subprocess.call(["/home/"+config.cluster_home[config.cluster_current]+"/remap", "-i", "rect", "-o", "view", "-m", str(input_height), "-b", str(input_width), "-w", str(output_width), "-h", str(output_height), "-x", str(view_fov_x), "-y", str(view_fov_y), "-p", str(view_center_lat), "-l", str(view_center_lon), "-z", "1", "-s", str(cur_frame), file_, temp_1]) frame=yuv_import(temp_1,(output_height,output_width),1,0) subprocess.call(["rm", temp_1]) if(is_render==True): print("this is debugging, not trainning") YY=frame[0] im=Image.frombytes('L',(output_height,output_width),YY.tostring()) im.show() frame = np.zeros((42,42,1)) frame = np.reshape(frame, [42, 42, 1]) else: frame = np.array(frame) frame = frame.astype(np.float32) frame *= (1.0 / 255.0) frame = np.reshape(frame, [42, 42, 1]) return frame
项目:Malmo 作者:wtconlin | 项目源码 | 文件源码
def waitForInitialState( self ): '''Before a command has been sent we wait for an observation of the world and a frame.''' # wait for a valid observation world_state = self.agent_host.peekWorldState() while world_state.is_mission_running and all(e.text=='{}' for e in world_state.observations): world_state = self.agent_host.peekWorldState() # wait for a frame to arrive after that num_frames_seen = world_state.number_of_video_frames_since_last_state while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen: world_state = self.agent_host.peekWorldState() world_state = self.agent_host.getWorldState() if world_state.is_mission_running: assert len(world_state.video_frames) > 0, 'No video frames!?' obs = json.loads( world_state.observations[-1].text ) self.prev_x = obs[u'XPos'] self.prev_y = obs[u'YPos'] self.prev_z = obs[u'ZPos'] self.prev_yaw = obs[u'Yaw'] print 'Initial position:',self.prev_x,',',self.prev_y,',',self.prev_z,'yaw',self.prev_yaw if save_images: # save the frame, for debugging frame = world_state.video_frames[-1] image = Image.frombytes('RGB', (frame.width, frame.height), str(frame.pixels) ) self.iFrame = 0 self.rep = self.rep + 1 image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(self.iFrame).zfill(4) + '.png' ) return world_state
项目:instax_api 作者:jpwsutton | 项目源码 | 文件源码
def decodeImage(self, imageBytes): """Decode the byte array into an image.""" targetImg = [] # Packing the individual colours back together. for h in range(self.printHeight): for w in range(self.printWidth): redTarget = (((w * self.printHeight) * 3) + (self.printHeight * 0)) + h greenTarget = (((w * self.printHeight) * 3) + (self.printHeight * 1)) + h blueTarget = (((w * self.printHeight) * 3) + (self.printHeight * 2)) + h targetImg.append(imageBytes[redTarget]) targetImg.append(imageBytes[greenTarget]) targetImg.append(imageBytes[blueTarget]) preImage = Image.frombytes('RGB', (self.printWidth, self.printHeight), bytes(targetImg)) self.myImage = preImage.rotate(90, expand=True)
项目:relaax 作者:deeplearninc | 项目源码 | 文件源码
def _unpack_image(cls, buf, offset): mode, offset = cls._unpack_string(buf, offset) x = unpack_from("I", buf, offset)[0] offset += 4 y = unpack_from("I", buf, offset)[0] offset += 4 reslen = unpack_from("I", buf, offset)[0] offset += 4 img = Image.frombytes(mode, (x, y), bytes(buf[offset:offset+reslen])) # .convert("RGB") res = np.asarray(img) if img.mode in ["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV"]: res = res.astype(np.float32) * (1.0 / 255.0) # print(res.shape) # res = np.reshape(res, (x, y, 1)) offset += reslen return res, offset
项目:malmo-challenge 作者:Kaixhin | 项目源码 | 文件源码
def frame(self): latest_ws = self._agent.peekWorldState() if hasattr(latest_ws, 'video_frames') and len(latest_ws.video_frames) > 0: self._last_frame = latest_ws.video_frames[-1] return Image.frombytes('RGB', (self._last_frame.width, self._last_frame.height), bytes(self._last_frame.pixels))
项目:malmo-challenge 作者:Microsoft | 项目源码 | 文件源码
def frame(self): latest_ws = self._agent.peekWorldState() if hasattr(latest_ws, 'video_frames') and len(latest_ws.video_frames) > 0: self._last_frame = latest_ws.video_frames[-1] return Image.frombytes('RGB', (self._last_frame.width, self._last_frame.height), bytes(self._last_frame.pixels))
项目:CozmoSelfDriveToyUsingCNN 作者:benjafire | 项目源码 | 文件源码
def create_default_image(image_width, image_height, do_gradient=False): '''Create a place-holder PIL image to use until we have a live feed from Cozmo''' image_bytes = bytearray([0x70, 0x70, 0x70]) * image_width * image_height if do_gradient: i = 0 for y in range(image_height): for x in range(image_width): image_bytes[i] = int(255.0 * (x / image_width)) # R image_bytes[i+1] = int(255.0 * (y / image_height)) # G image_bytes[i+2] = 0 # B i += 3 image = Image.frombytes('RGB', (image_width, image_height), bytes(image_bytes)) return image
项目:backlight-indicator 作者:atareao | 项目源码 | 文件源码
def get_backlight(): image_data = None try: video = v4l2capture.Video_device('/dev/video0') size_x, size_y = video.set_format(1280, 1024) video.create_buffers(1) video.start() time.sleep(2) video.queue_all_buffers() select.select((video,), (), ()) image_data = video.read() except Exception as e: print('-----', e, '-----') finally: video.stop() video.close() if image_data is not None: image = Image.frombytes("RGB", (size_x, size_y), image_data) value = calculation.calculate_brightness_for_image(image) print('==== captured: {0} ===='.format(value)) return value return None
项目:backlight-indicator 作者:atareao | 项目源码 | 文件源码
def get_backlight(self): subprocess.call(shlex.split(COMMAND)) pixbuf = GdkPixbuf.Pixbuf.new_from_file('/tmp/fswebcam.jpg') image = Image.frombytes('RGB', (pixbuf.get_width(), pixbuf.get_height()), pixbuf.get_pixels()) pixbuf.savev('/home/lorenzo/Escritorio/test.png', "png", [], []) return calculation.calculate_brightness_for_image(image)
项目:cozmo 作者:yp7y-stu | 项目源码 | 文件源码
def create_default_image(image_width, image_height, do_gradient=False): '''Create a place-holder PIL image to use until we have a live feed from Cozmo''' image_bytes = bytearray([0x70, 0x70, 0x70]) * image_width * image_height if do_gradient: i = 0 for y in range(image_height): for x in range(image_width): image_bytes[i] = int(255.0 * (x / image_width)) # R image_bytes[i+1] = int(255.0 * (y / image_height)) # G image_bytes[i+2] = 0 # B i += 3 image = Image.frombytes('RGB', (image_width, image_height), bytes(image_bytes)) return image
项目:Moderat 作者:Swordf1sh | 项目源码 | 文件源码
def recv_screenshot(self, data): webcam_dict = data['payload'] if webcam_dict == 'noWebcamError': message.error(self.moderat, self.moderat.MString('MSGBOX_ERROR'), self.moderat.MString('NOWEBCAM_ERROR')) return try: camera_info = ast.literal_eval(webcam_dict) im = Image.frombytes('RGB', (int(camera_info['width']), int(camera_info['height'])), zlib.decompress(camera_info['webcambits']), 'raw', 'BGR', 0, -1) camera_bits = im.convert('RGBA') self.cameraLabel.setPixmap(QPixmap.fromImage(ImageQt.ImageQt(camera_bits)).scaled( self.cameraLabel.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)) self.current_bits = camera_bits self.saveButton.setDisabled(False) self.clearButton.setDisabled(False) except SyntaxError: pass
项目:CAAPR 作者:Stargrazer82301 | 项目源码 | 文件源码
def ensurepil(self, invalidate=True): if self.dpil is None: if self.dbuf is not None: self.dpil = Image.frombytes("RGBA", self.shape, self.dbuf, "raw", "RGBA", 0, 1) elif self.darr is not None: data = self.scaledpixelarray(0,255.999) buf = np.rollaxis(data,1).astype(np.uint8).tostring() self.dpil = Image.frombytes("RGB", self.shape, buf, "raw", "RGB", 0, -1) else: raise ValueError("No source data for conversion to PIL image") if invalidate: self.dbuf = None self.darr = None self.rangearr = None ## This private function ensures that there is a valid buffer representation, converting from # one of the other representations if necessary, and invalidating the other representations if requested.
项目:CAAPR 作者:Stargrazer82301 | 项目源码 | 文件源码
def ensurepil(self, invalidate=True): if self.dpil is None: if self.dbuf is not None: self.dpil = Image.frombytes("RGBA", self.shape, self.dbuf, "raw", "RGBA", 0, 1) elif self.darr is not None: data = self.scaledpixelarray(0,255.999) buf = np.rollaxis(data,1).astype(np.uint8).tostring() self.dpil = Image.frombytes("RGB", self.shape, buf, "raw", "RGB", 0, -1) else: raise ValueError("No source data for conversion to PIL image") if invalidate: self.dbuf = None self.darr = None self.rangearr = None ## This private function ensures that there is a valid buffer representation, converting from # one of the other representations if necessary, and invalidating the other representations if requested.
项目:BeltaGo 作者:54BayMax | 项目源码 | 文件源码
def imageCallback(self, msg): global qImg try: cv_image = self.bridge.imgmsg_to_cv2(msg, "rgb8") cv_image=cv2.resize(cv_image, (640, 480)) image=PImage.frombytes("RGB", (640, 480), cv_image.tostring()) Lock.acquire() qImg=ImageQt(image) Lock.release() except CvBridgeError as e: print 'a' print(e)
项目:cozmo-python-sdk 作者:anki | 项目源码 | 文件源码
def create_default_image(image_width, image_height, do_gradient=False): '''Create a place-holder PIL image to use until we have a live feed from Cozmo''' image_bytes = bytearray([0x70, 0x70, 0x70]) * image_width * image_height if do_gradient: i = 0 for y in range(image_height): for x in range(image_width): image_bytes[i] = int(255.0 * (x / image_width)) # R image_bytes[i+1] = int(255.0 * (y / image_height)) # G image_bytes[i+2] = 0 # B i += 3 image = Image.frombytes('RGB', (image_width, image_height), bytes(image_bytes)) return image
项目:django-watermark-images 作者:abarto | 项目源码 | 文件源码
def lsb_encode(data, image): bytes_io = BytesIO() dump(data, file=bytes_io) data_bytes = bytes_io.getvalue() data_bytes_array = np.fromiter(data_bytes, dtype=np.uint8) data_bits_list = np.unpackbits(data_bytes_array).tolist() data_bits_list += [0] * (image.size[0] * image.size[1] - len(data_bits_list)) watermark = Image.frombytes(data=bytes(data_bits_list), size=image.size, mode='L') red, green, blue = image.split() watermarked_red = ImageMath.eval("convert(a&0xFE|b&0x1,'L')", a=red, b=watermark) watermarked_image = Image.merge("RGB", (watermarked_red, green, blue)) return watermarked_image
项目:geonotebook 作者:OpenGeoscience | 项目源码 | 文件源码
def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): ''' ''' # NB: To be thread-safe Map object cannot be stored in the class state. # see: https://groups.google.com/forum/#!topic/mapnik/USDlVfSk328 Map = mapnik.Map(width, height, srs) Map.zoom_to_box(Box2d(xmin, ymin, xmax, ymax)) Map = self.style_map(Map) img = mapnik.Image(width, height) # Don't even call render with scale factor if it's not # defined. Plays safe with older versions. if self.scale_factor is None: mapnik.render(Map, img) else: mapnik.render(Map, img, self.scale_factor) def gamma_correct(im): """Fast gamma correction with PIL's image.point() method.""" if self.gamma != 1.0: table = [pow(x / 255., 1.0 / self.gamma) * 255 for x in range(256)] # Expand table to number of bands table = table * len(im.mode) return im.point(table) else: return im # b = BytesIO(img.tostring()) img = Image.frombytes('RGBA', (width, height), img.tostring()) img = gamma_correct(img) return img
项目:beep 作者:nullstorage | 项目源码 | 文件源码
def create_default_image(image_width, image_height, do_gradient=False): '''Create a place-holder PIL image to use until we have a live feed from Cozmo''' image_bytes = bytearray([0x70, 0x70, 0x70]) * image_width * image_height if do_gradient: i = 0 for y in range(image_height): for x in range(image_width): image_bytes[i] = int(1000.0 * (x / image_width)) # R image_bytes[i+1] = int(1000.0 * (y / image_height)) # G image_bytes[i+2] = 0 # B i += 3 image = Image.frombytes('RGB', (image_width, image_height), bytes(image_bytes)) return image
项目:go_dino 作者:pauloalves86 | 项目源码 | 文件源码
def play_game(get_command_callback: Callable[[int, int, int], str]) -> int: with mss() as screenshotter: get_game_landscape_and_set_focus_or_die(screenshotter) reset_game() landscape = get_game_landscape_and_set_focus_or_die(screenshotter, .95) start_game() gameover_template = cv2.imread(os.path.join('templates', 'dino_gameover.png'), 0) start = time.time() last_distance = landscape['width'] x1, x2, y1, y2 = compute_region_of_interest(landscape) speed = 0 last_compute_speed = time.time() last_speeds = [3] * 30 last_command_time = time.time() while True: buffer = screenshotter.grab(landscape) image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L') image = np.array(image) image += np.abs(247 - image[0, x2]) roi = image[y1:y2, x1:x2] score = int(time.time() - start) distance, size = compute_distance_and_size(roi, x2) speed = compute_speed(distance, last_distance, speed, last_speeds, last_compute_speed) last_compute_speed = time.time() # Check GAME OVER if distance == last_distance or distance == 0: res = cv2.matchTemplate(image, gameover_template, cv2.TM_CCOEFF_NORMED) if np.where(res >= 0.7)[0]: reset_game() return score last_distance = distance if time.time() - last_command_time < 0.6: continue command = get_command_callback(distance, size, speed) if command: last_command_time = time.time() pyautogui.press(command)
项目:go_dino 作者:pauloalves86 | 项目源码 | 文件源码
def find_game_position(screenshotter, threshold) -> Dict: dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0) w, h = dino_template.shape[::-1] landscape_template = cv2.imread(os.path.join('templates', 'dino_landscape.png'), 0) lw, lh = landscape_template.shape[::-1] monitor = screenshotter.monitors[0] buffer = screenshotter.grab(monitor) image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L') image = np.array(image) res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= threshold) if len(loc[0]): pt = next(zip(*loc[::-1])) return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw) return {}
项目:WXBotForPi 作者:nemoTyrant | 项目源码 | 文件源码
def load(self, im): im.fp.seek(0) # rewind return Image.frombytes( "RGB", im.size, Image.core.drawwmf(im.fp.read(), im.size, self.bbox), "raw", "BGR", (im.size[0]*3 + 3) & -4, -1 )
项目:WXBotForPi 作者:nemoTyrant | 项目源码 | 文件源码
def open(filename): # FIXME: modify to return a WalImageFile instance instead of # plain Image object ? if hasattr(filename, "read"): fp = filename else: fp = builtins.open(filename, "rb") # read header fields header = fp.read(32+24+32+12) size = i32(header, 32), i32(header, 36) offset = i32(header, 40) # load pixel data fp.seek(offset) im = Image.frombytes("P", size, fp.read(size[0] * size[1])) im.putpalette(quake2palette) im.format = "WAL" im.format_description = "Quake2 Texture" # strings are null-terminated im.info["name"] = header[:32].split(b"\0", 1)[0] next_name = header[56:56+32].split(b"\0", 1)[0] if next_name: im.info["next_name"] = next_name return im
项目:WXBotForPi 作者:nemoTyrant | 项目源码 | 文件源码
def grab(bbox=None): if sys.platform == "darwin": fh, filepath = tempfile.mkstemp('.png') os.close(fh) subprocess.call(['screencapture', '-x', filepath]) im = Image.open(filepath) im.load() os.unlink(filepath) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
项目:vizgen 作者:uva-graphics | 项目源码 | 文件源码
def write_img(out_img, out_filename, do_clip=True): """Writes out_img to out_filename """ if use_4channel and len(out_img.shape) == 3 and out_img.shape[2] == 4: out_img = out_img[:,:,:3] assert out_img is not None, 'expected out_img to not be None' out_img = numpy.clip(out_img, 0, 1) if do_clip else out_img if is_pypy: out_img = numpy.asarray(out_img*255, 'uint8') if len(out_img.shape) == 2: mode = 'L' elif len(out_img.shape) == 3: if out_img.shape[2] == 3: mode = 'RGB' elif out_img.shape[2] == 4: mode = 'RGBA' else: raise ValueError('unknown color image mode') else: raise ValueError('unknown number of dimensions for image') I = Image.frombytes(mode, (out_img.shape[1], out_img.shape[0]), out_img.tobytes()) I.save(out_filename) else: try: skimage.io.imsave(out_filename, out_img) except: print('Caught exception while saving to {}: image shape is {}, min: {}, max: {}'.format(out_filename, out_img.shape, out_img.min(), out_img.max())) raise
项目:unicorn-hat-hd 作者:pimoroni | 项目源码 | 文件源码
def write(self, buf): img = Image.frombytes('RGB', (64, 64), buf) img = img.resize((16, 16), Image.BILINEAR) for x in range(16): for y in range(16): r, g, b = img.getpixel((x, y)) self.hat.set_pixel(x, y, r, g, b) self.hat.show()
项目:deresuteme 作者:marcan | 项目源码 | 文件源码
def load_image(fd): d = Asset(fd) tex = [i for i in d.objs if "image data" in i] assert len(tex) == 1 tex = tex[0] data = tex["image data"] width, height, fmt = tex["m_Width"], tex["m_Height"], tex["m_TextureFormat"] if fmt == 7: # BGR565 im = Image.frombytes("RGB", (width, height), data, "raw", "BGR;16") elif fmt == 13: # ABGR4444 im = Image.frombytes("RGBA", (width, height), data, "raw", "RGBA;4B") r, g, b, a = im.split() im = Image.merge("RGBA", (a, b, g, r)) else: raise Exception("Unsupported format %d" % fmt) im = im.transpose(Image.FLIP_TOP_BOTTOM) return im
项目:c3nav 作者:c3nav | 项目源码 | 文件源码
def run(self): while True: task = self._queue.get() ctx = self._get_ctx(task.width, task.height) ctx.ctx.clear(*task.background_rgb) ctx.prog.uniforms['mvp'].value = task.mvp if task.vertices: vbo = ctx.ctx.buffer(task.vertices) vao = ctx.ctx.simple_vertex_array(ctx.prog, vbo, ['in_vert', 'in_color']) vao.render() color_rbo2 = ctx.ctx.renderbuffer((task.width, task.height)) fbo2 = ctx.ctx.framebuffer(color_rbo2) ctx.ctx.copy_framebuffer(fbo2, ctx.fbo) img = Image.frombytes('RGB', (task.width, task.height), fbo2.read(components=3)) f = io.BytesIO() img.save(f, 'PNG') f.seek(0) task.set_result(f.read())
项目:tensorlayer-chinese 作者:shorxp | 项目源码 | 文件源码
def data_to_tfrecord(images, labels, filename): """ Save data into TFRecord """ print("Converting data into %s ..." % filename) cwd = os.getcwd() writer = tf.python_io.TFRecordWriter(filename) for index, img in enumerate(images): img_raw = img.tobytes() ## Visualize a image # tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236) label = int(labels[index]) # print(label) ## Convert the bytes back to image as follow: # image = Image.frombytes('RGB', (32, 32), img_raw) # image = np.fromstring(img_raw, np.float32) # image = image.reshape([32, 32, 3]) # tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236) example = tf.train.Example(features=tf.train.Features(feature={ "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])), 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), })) writer.write(example.SerializeToString()) # Serialize To String writer.close()
项目:tensorlayer-chinese 作者:shorxp | 项目源码 | 文件源码
def data_to_tfrecord(images, labels, filename): """ Save data into TFRecord """ if os.path.isfile(filename): print("%s exists" % filename) return print("Converting data into %s ..." % filename) cwd = os.getcwd() writer = tf.python_io.TFRecordWriter(filename) for index, img in enumerate(images): img_raw = img.tobytes() ## Visualize a image # tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236) label = int(labels[index]) # print(label) ## Convert the bytes back to image as follow: # image = Image.frombytes('RGB', (32, 32), img_raw) # image = np.fromstring(img_raw, np.float32) # image = image.reshape([32, 32, 3]) # tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236) example = tf.train.Example(features=tf.train.Features(feature={ "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])), 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), })) writer.write(example.SerializeToString()) # Serialize To String writer.close()
项目:teleport 作者:eomsoft | 项目源码 | 文件源码
def grab(bbox=None): if sys.platform == "darwin": fh, filepath = tempfile.mkstemp('.png') os.close(fh) subprocess.call(['screencapture', '-x', filepath]) im = Image.open(filepath) im.load() os.unlink(filepath) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
项目:teleport 作者:eomsoft | 项目源码 | 文件源码
def grab(bbox=None): if sys.platform == "darwin": fh, filepath = tempfile.mkstemp('.png') os.close(fh) subprocess.call(['screencapture', '-x', filepath]) im = Image.open(filepath) im.load() os.unlink(filepath) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
项目:alfred-image-utilities 作者:danielecook | 项目源码 | 文件源码
def grab(bbox=None): if sys.platform == "darwin": fh, filepath = tempfile.mkstemp('.png') os.close(fh) subprocess.call(['screencapture', '-x', filepath]) im = Image.open(filepath) im.load() os.unlink(filepath) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origin lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
项目:tensormsa_old 作者:TensorMSA | 项目源码 | 文件源码
def resize_bytes_image(self, data, conf, net_info): """ :param data: data object contain image data :param conf: data manipulation conf data :return: """ byte_obj = data['bt'] ext = str(data['decoder'] ,'utf-8') width = str(data['width'], 'utf-8') height = str(data['height'], 'utf-8') table = net_info['table'] byte_arr = bytearray(byte_obj) # mode : F , L , P , 1 , I, RGB, RGBA im = Image.frombytes('L', [int(width), int(height)], byte_obj, self.decoder_type(ext)) tv = list(im.getdata()) train_arr = [(255 - x) * 1.0 / 255.0 for x in tv] return train_arr