[PYTHON] Render and synthesize objects from front, back, left and right

To fill in the blanks in Blender Advent Calendar 2020 I will write an article about the 2.8 compatible version of the previously published script

Render the selected object from front, back, left and right, It is a script that synthesizes into one image, image.png Adjust the camera settings so that the shape selected in the 3D view fits Create an image rendered with front-back, left-right parallel projection and the current camera settings, and create a side-by-side image with the name [Combineed_IMG].

Please note that an error will occur if you do not select a mesh object.

render_around.py


import bpy
import math
import os
import numpy as np
import mathutils
#Fixed render width
render_width = 300


#Rendered image storage path
render_path = bpy.context.scene.render.filepath

def render_func(context):
    #Camera used in the scene
    camera = context.scene.camera
    if len(context.selected_objects) == 0: return
    ###############################
    ###########Save settings###########
    ##Rendering size
    render = context.scene.render
    ref_render_x = render.resolution_x
    ref_render_y = render.resolution_y
    ref_render_percentage =render.resolution_percentage
    ##Camera type (perspective projection,Parallel projection, etc.)
    ref_camera_type = camera.data.type
    ##position
    ref_location = camera.location.copy()
    ref_rotate = camera.rotation_euler.copy()
    ##################################
    
    ##In object mode
    bpy.ops.object.mode_set(mode = 'OBJECT')
    #bpy.ops.object.select_all(action="DESELECT")
    ##################################
    ##Camera settings
    ###Rendering size
    (bbox_width, center_pos) = get_bound_data(context)
    set_render_size(context, bbox_width)
    #Rendered in the initial state
    bpy.ops.render.render()
    #Save
    f_name = "view4.png "
    seve_render(f_name)
    ###Parallel projection
    camera.data.type = 'ORTHO'
    #Set the scale of parallel projection (width is based on BU)
    camera.data.ortho_scale = max(bbox_width)*1.2
    #Camera position(0, -10, 0.3)Rotate(90°, 0, 0)To
    camera_pos =get_camera_pos_top(bbox_width, center_pos)
    camera.location = camera_pos
    camera.rotation_euler = (math.radians(90.0), 0, 0)
    #########################################
    pos = mathutils.Vector((camera_pos))
    mat_rot1 = mathutils.Matrix.Rotation(math.radians(90.0), 4, 'Z')
    mat_trs = mathutils.Matrix.Translation(mathutils.Vector(center_pos))
    mat_rot = mat_trs *mat_rot1 *mat_trs.inverted()
    for i in range(4):
        #rendering
        bpy.ops.render.render()
        #Save
        f_name = "view%s.png " % i
        seve_render(f_name)
        #90 degree rotation
        pos = mat_rot @ pos
        camera.location = pos
        cam_rot = (math.radians(90.0), 0, math.radians(90.0)*(i+1))
        camera.rotation_euler = cam_rot
    ##################################
    ###########Write back settings###########
    ###Rendering size
    render.resolution_x = ref_render_x
    render.resolution_y = ref_render_y
    render.resolution_percentage = ref_render_percentage
    ##Camera type
    camera.data.type = ref_camera_type
    ##position
    camera.location = ref_location
    camera.rotation_euler = ref_rotate
    #########################################
    combine_image(render_path)

#Get drawing range data from the selected shape (center point),width)
def get_bound_data(context):
    objects = context.selected_objects
    bb_point_list = []
    #Get the bbox value of the selected shape in global coordinates
    for obj in objects:
        if obj.type != 'MESH':continue
        bbox_list = [mathutils.Vector(v[:]) for v in obj.bound_box]
        mat = obj.matrix_world
        bb_point_list += [mat@v for v in bbox_list]
    #Get range
    bbox_width = []
    center_pos = []
    for i in range(3):
        min_i = min(bb_point_list, key = (lambda x: x[i]))[i]
        max_i = max(bb_point_list, key = (lambda x: x[i]))[i]
        bbox_width.append( max_i - min_i )
        center_pos.append( (max_i + min_i)/2 )
    return(bbox_width, center_pos)
#Render size setting (fixed width)
def set_render_size(context, bbox_width):
    render = context.scene.render
    #print(bbox_width)
    #Get vertical length
    render_height = int(render_width*(bbox_width[2]/max(bbox_width[:2])))
    if render_height < render_width:  render_height = render_width
    render.resolution_x = render_width
    render.resolution_y = render_height
    render.resolution_percentage = 100
#Set camera position for front image rendering
def get_camera_pos_top(bbox_width, center_pos):
    distance = max(bbox_width)
    return(center_pos[0], center_pos[1]-distance,center_pos[2])
#Save image
def seve_render(f_name):
    img_path = os.path.join(render_path,f_name)
    bpy.data.images['Render Result'].save_render(filepath=img_path)

#Loading images
def load_tex(f_path):
    img = bpy.data.images.load(f_path)
    return(img)

#Convert rgba image to nparray
def img_to_nparray(img):
    bit_len = len(img.pixels)
    (width,height) = img.size
    channels = img.channels #Number of colors
    #Create numpy array
    pixlist = np.array(img.pixels)
    pixlist = pixlist.reshape( height, width, 4)
    return( pixlist )
    
#Join process
def combine_image(render_path):
    width = 0
    height = 0
    image_list = []
    for i in range(5):
        f_name = "view%s.png " % i
        img_path = os.path.join(render_path,f_name)
        img = load_tex(img_path)
        image_list.append(img)
        width += img.size[0]
        height = max(height, img.size[1])
    combine_img_np = np.zeros((height, width, 4))
    offset = 0
    for img in image_list:
        #Convert the read image into a numpy array
        np_array = img_to_nparray(img)
        (height, width, deps) = np_array.shape
        #Join process
        combine_img_np[0:height, offset:offset + width] = np_array
        combine_img_np[:,offset] = np.ones(4)
        offset += width
    #Create a Blender image data object from a numpy array
    img_name = 'Combineed_IMG'
    (height, width, deps) = combine_img_np.shape
    image_object = bpy.data.images.new(name=img_name, width=width, height=height)
    image_object.pixels = list(combine_img_np.flatten())
    for img in image_list:
        bpy.data.images.remove(img)
    
render_func(bpy.context)

The previous version is compatible with 2.8 or later and has some bug fixes.

Recommended Posts

Render and synthesize objects from front, back, left and right
About left justification and right justification of Kivy Label
Use AppSync on the front and back ends