SACA ESTA IMAGEN DEL NAZARENO EN PNG 4K, EN ESTA V...

Creado el: 1 de enero de 2026

Respondido usando GPT-5.2 Thinking por Chat01

Pregunta

capa cortass.PNGcapa cortass.PNG

SACA ESTA IMAGEN DEL NAZARENO EN PNG 4K, EN ESTA VISTA LATERAL

Python

python
from PIL import Image, ImageOps, ImageFilter img_path="/mnt/data/capa cortass.PNG" img=Image.open(img_path) img.mode, img.size ('RGBA', (304, 411))

Python

python
import numpy as np, PIL arr=np.array(img) arr.shape, arr[:,:,3].min(), arr[:,:,3].max(), np.unique(arr[:,:,3])[:10], len(np.unique(arr[:,:,3])) ((411, 304, 4), 255, 255, array([255], dtype=uint8), 1)

Python

python
corners=[arr[0,0,:3], arr[0,-1,:3], arr[-1,0,:3], arr[-1,-1,:3]] corners [array([231, 231, 231], dtype=uint8), array([231, 231, 231], dtype=uint8), array([231, 231, 231], dtype=uint8), array([231, 231, 231], dtype=uint8)]

Python

python
bg=np.array([231,231,231], dtype=np.int16) rgb=arr[:,:,:3].astype(np.int16) dist=np.sqrt(((rgb-bg)**2).sum(axis=2)) dist.min(), dist.max(), np.percentile(dist,[0,1,5,10,50,90,95,99,100]) (0.0, 256.34351952019387, array([ 0. , 0. , 0. , 0. , 0. , 41.56921938, 86.60254038, 159.3486743 , 256.34351952]))

Python

python
nonzero=dist[dist>0] nonzero.min(), np.percentile(nonzero,[0,1,5,10,20]) (1.7320508075688772, array([ 1.73205081, 1.73205081, 5.19615242, 8.66025404, 15.58845727]))

Python

python
from PIL import Image import numpy as np, os, math img = Image.open("/mnt/data/capa cortass.PNG").convert("RGBA") arr = np.array(img).astype(np.float32) bg = np.array([231,231,231], dtype=np.float32) rgb = arr[:,:,:3] dist = np.sqrt(((rgb - bg)**2).sum(axis=2)) t0, t1 = 1.5, 25.0 alpha = np.clip((dist - t0) / (t1 - t0), 0, 1) * 255.0 # Feather a bit by applying slight blur to alpha alpha_img = Image.fromarray(alpha.astype(np.uint8), mode="L").filter(ImageFilter.GaussianBlur(radius=1.2)) alpha = np.array(alpha_img).astype(np.float32) # Edge color decontamination: solve for foreground color assuming bg a = alpha / 255.0 a3 = np.repeat(a[:,:,None], 3, axis=2) # avoid division by zero fg = np.where(a3 > 1e-3, (rgb - (1 - a3) * bg) / np.clip(a3, 1e-3, 1.0), rgb) fg = np.clip(fg, 0, 255) out_arr = np.dstack([fg, alpha]).astype(np.uint8) out = Image.fromarray(out_arr, mode="RGBA") # crop to bbox of alpha>0 mask = alpha_img.point(lambda p: 255 if p>3 else 0) bbox = mask.getbbox() bbox (77, 108, 218, 362)

Python

python
x0,y0,x1,y1=bbox pad=int(0.05*max(x1-x0,y1-y0)) bbox_p=(max(0,x0-pad), max(0,y0-pad), min(img.size[0],x1+pad), min(img.size[1],y1+pad)) bbox_p, img.size ((65, 96, 230, 374), (304, 411))

Python

python
cropped = out.crop(bbox_p) cropped.size (165, 278)

Python

python
from PIL import Image def place_on_canvas(cropped, canvas_size, margin, bottom_align=True): W,H=canvas_size target_W=W-2*margin target_H=H-2*margin cw,ch=cropped.size scale=min(target_W/cw, target_H/ch) new_size=(max(1,int(round(cw*scale))), max(1,int(round(ch*scale)))) resized=cropped.resize(new_size, Image.Resampling.LANCZOS) x=(W-new_size[0])//2 if bottom_align: y=H-margin-new_size[1] else: y=(H-new_size[1])//2 canvas=Image.new("RGBA", (W,H), (0,0,0,0)) canvas.alpha_composite(resized, (x,y)) return canvas square4k=place_on_canvas(cropped, (4096,4096), margin=200, ('/mnt/data/nazareno_lateral_4k_4096.png', '/mnt/data/nazareno_lateral_4k_3840x2160.png', 2303439, 847468)

Python

python
sq = Image.open(square_path) np.array(sq)[:,:,3].min(), np.array(sq)[:,:,3].max() (0, 255)

Python

python
from PIL import Image, ImageFilter import numpy as np, os, math img = Image.open("/mnt/data/capa cortass.PNG").convert("RGBA") arr = np.array(img).astype(np.float32) bg = np.array([231,231,231], dtype=np.float32) rgb = arr[:,:,:3] dist = np.sqrt(((rgb - bg)**2).sum(axis=2)) t0, t1 = 5.0, 35.0 a = np.clip((dist - t0) / (t1 - t0), 0, 1) a = a**1.6 alpha = (a*255.0) alpha_img = Image.fromarray(alpha.astype(np.uint8)) alpha_img = alpha_img.filter(ImageFilter.GaussianBlur(radius=0.8)) alpha_np = np.array(alpha_img).astype(np.float32) alpha_np[alpha_np < 12] = 0 # cut fringe alpha_img = Image.fromarray(alpha_np.astype(np.uint8), mode="L") a = alpha_np/255.0 a3 = np.repeat(a[:,:,None], 3, axis=2) # decontam fg = np.where(a3 > 1e-3, (rgb - (1 - a3) * bg) / np.clip(a3, 1e-3, 1.0), rgb) fg = np.clip(fg, 0, 255) out_arr = np.dstack([fg, alpha_np]).astype(np.uint8) out = Image.fromarray(out_arr, mode="RGBA") mask = alpha_img.point(lambda p: 255 if p>3 else 0) bbox = mask.getbbox() pad=int(0.05*max(bbox[2]-bbox[0], bbox[3]-bbox[1])) bbox_p=(max(0,bbox[0]-pad), max(0,bbox[1]-pad), min(img.size[0],bbox[2]+pad), min(img.size[1],bbox[3]+pad)) cropped = out.crop(bbox_p) cropped.size, bbox_p ((161, 275), (67, 97, 228, 372))

Python

python
def place_on_canvas(cropped, canvas_size, margin, bottom_align=True): W,H=canvas_size target_W=W-2*margin target_H=H-2*margin cw,ch=cropped.size scale=min(target_W/cw, target_H/ch) new_size=(max(1,int(round(cw*scale))), max(1,int(round(ch*scale)))) resized=cropped.resize(new_size, Image.Resampling.LANCZOS) x=(W-new_size[0])//2 y=(H-margin-new_size[1]) if bottom_align else (H-new_size[1])//2 canvas=Image.new("RGBA", (W,H), (0,0,0,0)) canvas.alpha_composite(resized, (x,y)) return canvas, new_size, (x,y) square4k2,_sz,_pos = place_on_canvas(cropped, (4096,4096), margin=200, bottom_align=True) uhd4k2,_,_ = place_on_canvas(cropped, (3840,2160), margin=150, bottom_align=True) square_path2="/mnt/data/nazareno_lateral_4k_4096_v2.png" uhd_path2="/mnt/data/nazareno_lateral_4k_3840x2160_v2.png" square4k2.save(square_path2) uhd4k2.save(uhd_path2) square_path2, uhd_path2, _sz, _pos ('/mnt/data/nazareno_lateral_4k_4096_v2.png', '/mnt/data/nazareno_lateral_4k_3840x2160_v2.png', (2164, 3696), (966, 200))

Python

python
def place_on_canvas(cropped, canvas_size, margin, bottom_align=True): W,H=canvas_size target_W=W-2*margin target_H=H-2*margin cw,ch=cropped.size scale=min(target_W/cw, target_H/ch) new_size=(max(1,int(round(cw*scale))), max(1,int(round(ch*scale)))) resized=cropped.resize(new_size, Image.Resampling.LANCZOS) x=(W-new_size[0])//2 y=(H-margin-new_size[1]) if bottom_align else (H-new_size[1])//2 canvas=Image.new("RGBA", (W,H), (0,0,0,0)) canvas.alpha_composite(resized, (x,y)) return canvas, new_size, (x,y) square4k2,_sz,_pos = place_on_canvas(cropped, (4096,4096), margin=200, bottom_align=True) uhd4k2,_,_ = place_on_canvas(cropped, (3840,2160), margin=150, bottom_align=True) square_path2="/mnt/data/nazareno_lateral_4k_4096_v2.png" uhd_path2="/mnt/data/nazareno_lateral_4k_3840x2160_v2.png" square4k2.save(square_path2) uhd4k2.save(uhd_path2) square_path2, uhd_path2, _sz, _pos ('/mnt/data/nazareno_lateral_4k_4096_v2.png', '/mnt/data/nazareno_lateral_4k_3840x2160_v2.png', (2164, 3696), (966, 200)) <<ImageDisplayed>>

Python

python
import cv2, numpy as np, PIL cv2.__version__ '4.11.0'

Python

python
import cv2, numpy as np from PIL import Image, ImageFilter # load image pil_img = Image.open("/mnt/data/capa cortass.PNG").convert("RGBA") rgb = np.array(pil_img)[:,:,:3] bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) h,w = bgr.shape[:2] # init mask mask = np.zeros((h,w), np.uint8) # rectangle around object (estimate) # Use previous bbox from first method and expand rect = (50, 60, 204, 330) # x,y,w,h; within image bgdModel = np.zeros((1,65), np.float64) fgdModel = np.zeros((1,65), np.float64) cv2.grabCut(bgr, mask, rect, bgdModel, fgdModel, 12, cv2.GC_INIT_WITH_RECT) # foreground mask fg_mask = np.where((mask==cv2.GC_FGD) | (mask==cv2.GC_PR_FGD), 1, 0).astype(np.uint8) # morphological cleanup: close then open kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel, iterations=2) fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel, iterations=1) fg_mask.sum(), h*w (23766, 124944)

Python

python
fg_u8 = fg_mask.astype(np.uint8) inv_u8 = (1 - fg_u8).astype(np.uint8) # distance transforms fg_dist = cv2.distanceTransform(fg_u8, cv2.DIST_L2, 5) bg_dist = cv2.distanceTransform(inv_u8, cv2.DIST_L2, 5) alpha = fg_dist / (fg_dist + bg_dist + 1e-6) alpha = np.clip(alpha, 0, 1) # sharpen alpha a bit alpha = alpha**0.8 alpha_u8 = (alpha*255).astype(np.uint8) alpha_u8.min(), alpha_u8.max(), np.percentile(alpha_u8,[0,1,5,50,95,99,100]) (0, 255, array([ 0., 0., 0., 0., 255., 255., 255.]))

Python

python
from PIL import Image bg = np.array([231,231,231], dtype=np.float32) rgbf = rgb.astype(np.float32) a = (alpha_u8.astype(np.float32)/255.0) a3 = np.repeat(a[:,:,None],3,axis=2) # decontam only for partially transparent pixels to reduce halo; keep fully opaque unchanged fg = rgbf.copy() partial = (a>0) & (a<0.999) partial3 = np.repeat(partial[:,:,None],3,axis=2) fg = np.where(partial3, (rgbf - (1-a3)*bg)/np.clip(a3,1e-3,1.0), rgbf) fg = np.clip(fg,0,255) out_arr = np.dstack([fg, alpha_u8]).astype(np.uint8) out_pil = Image.fromarray(out_arr, mode="RGBA") # crop bbox mask_pil = Image.fromarray(alpha_u8, mode="L") bbox = mask_pil.getbbox() bbox (80, 110, 215, 359)

Python

python
x0,y0,x1,y1=bbox pad=int(0.06*max(x1-x0,y1-y0)) bbox_p=(max(0,x0-pad), max(0,y0-pad), min(w,x1+pad), min(h,y1+pad)) cropped = out_pil.crop(bbox_p) cropped.size (163, 277)

Python

python
square4k3,_,_ = place_on_canvas(cropped, (4096,4096), margin=200, bottom_align=True) uhd4k3,_,_ = place_on_canvas(cropped, (3840,2160), margin=150, bottom_align=True) square_path3="/mnt/data/nazareno_lateral_4k_4096_clean.png" uhd_path3="/mnt/data/nazareno_lateral_4k_3840x2160_clean.png" square4k3.save(square_path3) uhd4k3.save(uhd_path3) square_path3, uhd_path3 ('/mnt/data/nazareno_lateral_4k_4096_clean.png', '/mnt/data/nazareno_lateral_4k_3840x2160_clean.png')

Python

python
square4k3,_,_ = place_on_canvas(cropped, (4096,4096), margin=200, bottom_align=True) uhd4k3,_,_ = place_on_canvas(cropped, (3840,2160), margin=150, bottom_align=True) square_path3="/mnt/data/nazareno_lateral_4k_4096_clean.png" uhd_path3="/mnt/data/nazareno_lateral_4k_3840x2160_clean.png" square4k3.save(square_path3) uhd4k3.save(uhd_path3) square_path3, uhd_path3 ('/mnt/data/nazareno_lateral_4k_4096_clean.png', '/mnt/data/nazareno_lateral_4k_3840x2160_clean.png') <<ImageDisplayed>>

Python

python
from PIL import Image, ImageFilter, ImageChops, ImageEnhance import numpy as np, cv2 # Use grabCut fg_mask from before hard_alpha = (fg_mask*255).astype(np.uint8) # build RGBA without decontam for stability rgba = np.dstack([rgb.astype(np.uint8), hard_alpha]) obj = Image.fromarray(rgba, mode="RGBA") mask_pil = Image.fromarray(hard_alpha, mode="L") bbox = mask_pil.getbbox() pad=int(0.06*max(bbox[2]-bbox[0], bbox[3]-bbox[1])) bbox_p=(max(0,bbox[0]-pad), max(0,bbox[1]-pad), min(w,bbox[2]+pad), min(h,bbox[3]+pad)) cropped_hard = obj.crop(bbox_p) # place on canvas with resizing def place_on_canvas_smooth(cropped, canvas_size, margin, bottom_align=True, alpha_blur_radius=1.2): W,H=canvas_size target_W=W-2*margin target_H=H-2*margin cw,ch=cropped.size scale=min(target_W/cw, target_H/ch) new_size=(max(1,int(round(cw*scale))), max(1,int(round(ch*scale)))) resized = cropped.resize(new_size, Image.Resampling.LANCZOS) # smooth alpha slightly to reduce jaggies r,g,b,a = resized.split() a_blur = a.filter(ImageFilter.GaussianBlur(radius=alpha_blur_radius)) # boost alpha slightly a_enh = ImageEnhance.Contrast(a_blur).enhance(1.1) resized = Image.merge("RGBA", (r,g,b,a_enh)) x=(W-new_size[0])//2 y=(H-margin-new_size[1]) if bottom_align else (H-new_size[1])//2 canvas=Image.new("RGBA", (W,H), (0,0,0,0)) canvas.alpha_composite(resized, (x,y)) return canvas square4k4 = place_on_canvas_smooth(cropped_hard, (4096,4096), margin=200, bottom_align=True, alpha_blur_radius=1.0) square_path4="/mnt/data/nazareno_lateral_4k_4096_smooth.png" square4k4.save(square_path4) square_path4 '/mnt/data/nazareno_lateral_4k_4096_smooth.png'

Python

python
from PIL import Image, ImageFilter, ImageChops, ImageEnhance import numpy as np, cv2 # Use grabCut fg_mask from before hard_alpha = (fg_mask*255).astype(np.uint8) # build RGBA without decontam for stability rgba = np.dstack([rgb.astype(np.uint8), hard_alpha]) obj = Image.fromarray(rgba, mode="RGBA") mask_pil = Image.fromarray(hard_alpha, mode="L") bbox = mask_pil.getbbox() pad=int(0.06*max(bbox[2]-bbox[0], bbox[3]-bbox[1])) bbox_p=(max(0,bbox[0]-pad), max(0,bbox[1]-pad), min(w,bbox[2]+pad), min(h,bbox[3]+pad)) cropped_hard = obj.crop(bbox_p) # place on canvas with resizing def place_on_canvas_smooth(cropped, canvas_size, margin, bottom_align=True, alpha_blur_radius=1.2): W,H=canvas_size target_W=W-2*margin target_H=H-2*margin cw,ch=cropped.size scale=min(target_W/cw, target_H/ch) new_size=(max(1,int(round(cw*scale))), max(1,int(round(ch*scale)))) resized = cropped.resize(new_size, Image.Resampling.LANCZOS) # smooth alpha slightly to reduce jaggies r,g,b,a = resized.split() a_blur = a.filter(ImageFilter.GaussianBlur(radius=alpha_blur_radius)) # boost alpha slightly a_enh = ImageEnhance.Contrast(a_blur).enhance(1.1) resized = Image.merge("RGBA", (r,g,b,a_enh)) x=(W-new_size[0])//2 y=(H-margin-new_size[1]) if bottom_align else (H-new_size[1])//2 canvas=Image.new("RGBA", (W,H), (0,0,0,0)) canvas.alpha_composite(resized, (x,y)) return canvas square4k4 = place_on_canvas_smooth(cropped_hard, (4096,4096), margin=200, bottom_align=True, alpha_blur_radius=1.0) square_path4="/mnt/data/nazareno_lateral_4k_4096_smooth.png" square4k4.save(square_path4) square_path4 '/mnt/data/nazareno_lateral_4k_4096_smooth.png' <<ImageDisplayed>>

Python

python
square4k5 = place_on_canvas_smooth(cropped_hard, (4096,4096), margin=200, bottom_align=True, alpha_blur_radius=2.5) square_path5="/mnt/data/nazareno_lateral_4k_4096_smoother.png" square4k5.save(square_path5) square_path5 '/mnt/data/nazareno_lateral_4k_4096_smoother.png'

Python

python
square4k5 = place_on_canvas_smooth(cropped_hard, (4096,4096), margin=200, bottom_align=True, alpha_blur_radius=2.5) square_path5="/mnt/data/nazareno_lateral_4k_4096_smoother.png" square4k5.save(square_path5) square_path5 '/mnt/data/nazareno_lateral_4k_4096_smoother.png' <<ImageDisplayed>>

Python

python
from PIL import Image img4096 = Image.open("/mnt/data/nazareno_lateral_4k_4096_clean.png") thumb = img4096.resize((1024,1024), Image.Resampling.LANCZOS) thumb_path="/mnt/data/preview_1024.png" thumb.save(thumb_path) thumb_path '/mnt/data/preview_1024.png'

Python

python
from PIL import Image img4096 = Image.open("/mnt/data/nazareno_lateral_4k_4096_clean.png") thumb = img4096.resize((1024,1024), Image.Resampling.LANCZOS) thumb_path="/mnt/data/preview_1024.png" thumb.save(thumb_path) thumb_path '/mnt/data/preview_1024.png' <<ImageDisplayed>>

Python

python
# Create smoother mask by median blur fg_smooth = cv2.medianBlur(fg_mask*255, 5) fg_smooth = (fg_smooth > 127).astype(np.uint8) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7)) fg_smooth = cv2.morphologyEx(fg_smooth, cv2.MORPH_CLOSE, kernel, iterations=1) fg_smooth = cv2.morphologyEx(fg_smooth, cv2.MORPH_OPEN, kernel, iterations=1) hard_alpha2 = (fg_smooth*255).astype(np.uint8) # Build RGBA rgba2 = np.dstack([rgb.astype(np.uint8), hard_alpha2]) obj2 = Image.fromarray(rgba2, mode="RGBA") mask_pil2 = Image.fromarray(hard_alpha2, mode="L") bbox2 = mask_pil2.getbbox() pad=int(0.06*max(bbox2[2]-bbox2[0 ('/mnt/data/nazareno_lateral_4k_4096_med.png', '/mnt/data/preview_1024_med.png')

Python

python
# Create smoother mask by median blur fg_smooth = cv2.medianBlur(fg_mask*255, 5) fg_smooth = (fg_smooth > 127).astype(np.uint8) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7)) fg_smooth = cv2.morphologyEx(fg_smooth, cv2.MORPH_CLOSE, kernel, iterations=1) fg_smooth = cv2.morphologyEx(fg_smooth, cv2.MORPH_OPEN, kernel, iterations=1) hard_alpha2 = (fg_smooth*255).astype(np.uint8) # Build RGBA rgba2 = np.dstack([rgb.astype(np.uint8), hard_alpha2]) obj2 = Image.fromarray(rgba2, mode="RGBA") mask_pil2 = Image.fromarray(hard_alpha2, mode="L") bbox2 = mask_pil2.getbbox() pad=int(0.06*max(bbox2[2]-bbox2[0 ('/mnt/data/nazareno_lateral_4k_4096_med.png', '/mnt/data/preview_1024_med.png') <<ImageDisplayed>>

Python

python
uhd4k6 = place_on_canvas_smooth(cropped2, (3840,2160), margin=150, bottom_align=True, alpha_blur_radius=1.2) uhd6_path="/mnt/data/nazareno_lateral_4k_3840x2160_med.png" uhd4k6.save(uhd6_path) uhd6_path '/mnt/data/nazareno_lateral_4k_3840x2160_med.png'

Pensamiento

uhd4k6 = place_on_canvas_smooth(cropped2, (3840,2160), margin=150, bottom_align=True, alpha_blur_radius=1.2) uhd6_path="/mnt/data/nazareno_lateral_4k_3840x2160_med.png" uhd4k6.save(uhd6_path) uhd6_path

'/mnt/data/nazareno_lateral_4k_3840x2160_med.png'

Thought for 7m 12s

Respuesta

Listo — aquí tienes el PNG en 4K manteniendo esta vista lateral (con fondo transparente):

Compartir este Q&A