import itertools from Crypto.Util.number import * ...

Créé le : 9 janvier 2026

Répondu en utilisant GPT-5.2 Thinking par Chat01

Question

666.py

import itertools
from Crypto.Util.number import *
import time
from sage.all import *
import random
from itertools import product
from math import prod
from re import findall
from subprocess import check_output

DEBUG_ROOTS = None

def flatter(M, delta):
# compile https://github.com/keeganryan/flatter and put it in $PATH
z = "[[" + "]\n[".join(" ".join(map(str, row)) for row in M) + "]]"
ret = check_output(["flatter"], input=z.encode())
return matrix(M.nrows(), M.ncols(), map(int, findall(b"-?\d+", ret)))

def create_lattice(pr, shifts, bounds, order="invlex", sort_shifts_reverse=False, sort_monomials_reverse=False):
"""
Creates a lattice from a list of shift polynomials.
:param pr: the polynomial ring
:param shifts: the shifts
:param bounds: the bounds
:param order: the order to sort the shifts/monomials by
:param sort_shifts_reverse: set to true to sort the shifts in reverse order
:param sort_monomials_reverse: set to true to sort the monomials in reverse order
:return: a tuple of lattice and list of monomials
"""
print(f"Creating a lattice with {len(shifts)} shifts ({order = }, {sort_shifts_reverse = }, {sort_monomials_reverse = })...")
if pr.ngens() > 1:
pr_ = pr.change_ring(ZZ, order=order)
shifts = [pr_(shift) for shift in shifts]

text
monomials = set() for shift in shifts: monomials.update(shift.monomials()) shifts.sort(reverse=sort_shifts_reverse) monomials = sorted(monomials, reverse=sort_monomials_reverse) L = matrix(ZZ, len(shifts), len(monomials)) for row, shift in enumerate(shifts): for col, monomial in enumerate(monomials): L[row, col] = shift.monomial_coefficient(monomial) * monomial(*bounds) monomials = [pr(monomial) for monomial in monomials] return L, monomials

def reduce_lattice(L, delta=0.9):
"""
Reduces a lattice basis using a lattice reduction algorithm (currently LLL).
:param L: the lattice basis
:param delta: the delta parameter for LLL (default: 0.8)
:return: the reduced basis
"""
print(f"Reducing a {L.nrows()} x {L.ncols()} lattice...")
return flatter(L, delta)

def reconstruct_polynomials(B, f, modulus, monomials, bounds, preprocess_polynomial=lambda x: x, divide_gcd=True):
"""
Reconstructs polynomials from the lattice basis in the monomials.
:param B: the lattice basis
:param f: the original polynomial (if set to None, polynomials will not be divided by f if possible)
:param modulus: the original modulus
:param monomials: the monomials
:param bounds: the bounds
:param preprocess_polynomial: a function which preprocesses a polynomial before it is added to the list (default: identity function)
:param divide_gcd: if set to True, polynomials will be pairwise divided by their gcd if possible (default: True)
:return: a list of polynomials
"""
divide_original = f is not None
modulus_bound = modulus is not None
print(f"Reconstructing polynomials ({divide_original = }, {modulus_bound = }, {divide_gcd = })...")
polynomials = []
for row in range(B.nrows()):
norm_squared = 0
w = 0
polynomial = 0
for col, monomial in enumerate(monomials):
if B[row, col] == 0:
continue
norm_squared += B[row, col] ** 2
w += 1
assert B[row, col] % monomial(*bounds) == 0
polynomial += B[row, col] * monomial // monomial(*bounds)

text
# Equivalent to norm >= modulus / sqrt(w) if modulus_bound and norm_squared * w >= modulus ** 2: print(f"Row {row} is too large, ignoring...") continue polynomial = preprocess_polynomial(polynomial) if divide_original and polynomial % f == 0: print(f"Original polynomial divides reconstructed polynomial at row {row}, dividing...") polynomial //= f if divide_gcd: for i in range(len(polynomials)): g = gcd(polynomial, polynomials[i]) # TODO: why are we only allowed to divide out g if it is constant? if g != 1 and g.is_constant(): print(f"Reconstructed polynomial has gcd {g} with polynomial at {i}, dividing...") polynomial //= g polynomials[i] //= g if polynomial.is_constant(): print(f"Polynomial at row {row} is constant, ignoring...") continue if DEBUG_ROOTS is not None: print(f"Polynomial at row {row} roots check: {polynomial(*DEBUG_ROOTS)}") polynomials.append(polynomial) print(f"Reconstructed {len(polynomials)} polynomials") return polynomials

def find_roots_univariate(x, polynomial):
"""
Returns a generator generating all roots of a univariate polynomial in an unknown.
:param x: the unknown
:param polynomial: the polynomial
:return: a generator generating dicts of (x: root) entries
"""
if polynomial.is_constant():
return

text
for root in polynomial.roots(multiplicities=False): if root != 0: yield {x: int(root)}

def find_roots_gcd(pr, polynomials):
"""
Returns a generator generating all roots of a polynomial in some unknowns.
Uses pairwise gcds to find trivial roots.
:param pr: the polynomial ring
:param polynomials: the reconstructed polynomials
:return: a generator generating dicts of (x0: x0root, x1: x1root, ...) entries
"""
if pr.ngens() != 2:
return

text
print("Computing pairwise gcds to find trivial roots...") x, y = pr.gens() for i in range(len(polynomials)): for j in range(i): g = gcd(polynomials[i], polynomials[j]) if g.degree() == 1 and g.nvariables() == 2 and g.constant_coefficient() == 0: # g = ax + by a = int(g.monomial_coefficient(x)) b = int(g.monomial_coefficient(y)) yield {x: b, y: a} yield {x: -b, y: a}

def find_roots_groebner(pr, polynomials):
"""
Returns a generator generating all roots of a polynomial in some unknowns.
Uses Groebner bases to find the roots.
:param pr: the polynomial ring
:param polynomials: the reconstructed polynomials
:return: a generator generating dicts of (x0: x0root, x1: x1root, ...) entries
"""
# We need to change the ring to QQ because groebner_basis is much faster over a field.
# We also need to change the term order to lexicographic to allow for elimination.
gens = pr.gens()
s = Sequence(polynomials, pr.change_ring(QQ, order="lex"))
while len(s) > 0:
G = s.groebner_basis()
print(f"Sequence length: {len(s)}, Groebner basis length: {len(G)}")
if len(G) == len(gens):
print(f"Found Groebner basis with length {len(gens)}, trying to find roots...")
roots = {}
for polynomial in G:
vars = polynomial.variables()
if len(vars) == 1:
for root in find_roots_univariate(vars[0], polynomial.univariate_polynomial()):
roots |= root

text
if len(roots) == pr.ngens(): yield roots return print(f"System is underdetermined, trying to find constant root...") G = Sequence(s, pr.change_ring(ZZ, order="lex")).groebner_basis() vars = tuple(map(lambda x: var(x), gens)) for solution_dict in solve([polynomial(*vars) for polynomial in G], vars, solution_dict=True): print(solution_dict) found = False roots = {} for i, v in enumerate(vars): s = solution_dict[v] if s.is_constant(): if not s.is_zero(): found = True roots[gens[i]] = int(s) if s.is_integer() else int(s) + 1 else: roots[gens[i]] = 0 if found: yield roots return return else: # Remove last element (the biggest vector) and try again. s.pop()

def find_roots_resultants(gens, polynomials):
"""
Returns a generator generating all roots of a polynomial in some unknowns.
Recursively computes resultants to find the roots.
:param polynomials: the reconstructed polynomials
:param gens: the unknowns
:return: a generator generating dicts of (x0: x0root, x1: x1root, ...) entries
"""
if len(polynomials) == 0:
return

text
if len(gens) == 1: if polynomials[0].is_univariate(): yield from find_roots_univariate(gens[0], polynomials[0].univariate_polynomial()) else: resultants = [polynomials[0].resultant(polynomials[i], gens[0]) for i in range(1, len(gens))] for roots in find_roots_resultants(gens[1:], resultants): for polynomial in polynomials: polynomial = polynomial.subs(roots) if polynomial.is_univariate(): for root in find_roots_univariate(gens[0], polynomial.univariate_polynomial()): yield roots | root

def find_roots_variety(pr, polynomials):
"""
Returns a generator generating all roots of a polynomial in some unknowns.
Uses the Sage variety (triangular decomposition) method to find the roots.
:param pr: the polynomial ring
:param polynomials: the reconstructed polynomials
:return: a generator generating dicts of (x0: x0root, x1: x1root, ...) entries
"""
# We need to change the ring to QQ because variety requires a field.
s = Sequence([], pr.change_ring(QQ))
for polynomial in polynomials:
s.append(polynomial)
I = s.ideal()
dim = I.dimension()
print(f"Sequence length: {len(s)}, Ideal dimension: {dim}")
if dim == -1:
s.pop()
elif dim == 0:
print("Found ideal with dimension 0, computing variety...")
for roots in I.variety(ring=ZZ):
yield {k: int(v) for k, v in roots.items()}

        return

def solve_root_jacobian_newton_internal(pollst, startpnt, **kwds):
if 'maxiternum' not in kwds:
kwds['maxiternum'] = 1024
maxiternum = kwds['maxiternum']

text
pollst_Q = Sequence(pollst, pollst[0].parent().change_ring(QQ)) vars_pol = pollst_Q[0].parent().gens() jac = jacobian(pollst_Q, vars_pol) if all([ele == 0 for ele in startpnt]): prepnt = {vars_pol[i]: 1 for i in range(len(vars_pol))} else: prepnt = {vars_pol[i]: 0 for i in range(len(vars_pol))} pnt = {vars_pol[i]: startpnt[i] for i in range(len(vars_pol))} iternum = 0 while True: if iternum >= maxiternum: print("Newton: maxiternum reached, giving up.") return None evalpollst = [pollst_Q[i].subs(pnt) for i in range(len(pollst_Q))] if all([int(ele) == 0 for ele in evalpollst]): break jac_eval = jac.subs(pnt) evalpolvec = vector(QQ, len(evalpollst), evalpollst) try: pnt_diff_vec = jac_eval.solve_right(evalpolvec) except: print("Newton: jac.solve_right failed.") return None prepnt = {k: v for k, v in pnt.items()} pnt = {vars_pol[i]: round(QQ(pnt[vars_pol[i]] - pnt_diff_vec[i])) for i in range(len(pollst_Q))} if all([prepnt[vars_pol[i]] == pnt[vars_pol[i]] for i in range(len(vars_pol))]): print("Newton: point update failed (converged local sol).") return None prepnt = {k: v for k, v in pnt.items()} iternum += 1 return [int(pnt[vars_pol[i]]) for i in range(len(vars_pol))]

def solve_root_jacobian_newton(pollst, bounds, **kwds):
st = time.time()
pollst_local = pollst[:]
vars_pol = pollst[0].parent().gens()

text
if len(vars_pol) > len(pollst): return [] # 默认参数 # kwds.setdefault('select_subpollst_loopnum', 10) kwds.setdefault('search_near_positive_bounds_only', False) # kwds.setdefault('filter_small_solution_minbound', 2**16) for _ in range(kwds['select_subpollst_loopnum']): random.shuffle(pollst_local) signsiter = [[1]*len(vars_pol)] if kwds['search_near_positive_bounds_only'] else itertools.product([1, -1], repeat=len(vars_pol)) for signs in signsiter: startpnt = [signs[i] * bounds[i] for i in range(len(vars_pol))] result = solve_root_jacobian_newton_internal(pollst_local[:len(vars_pol)], startpnt, **kwds) if result is not None: if all(abs(ele) < kwds['filter_small_solution_minbound'] for ele in result): continue ed = time.time() print(f"Newton: found root in {ed - st} seconds", ) return [result] return []

def find_roots_newton(pr, polynomials, bounds, **kwds):
"""
Uses Newton-Jacobian method to find integer roots.
:param pr: polynomial ring
:param polynomials: reconstructed polynomials
:param bounds: bounds for each variable (list of ints)
:param kwds: passed directly to solve_root_jacobian_newton
:yield: dict of {var: root}
"""
if not polynomials:
return
vars_pol = pr.gens()
roots = solve_root_jacobian_newton(polynomials, bounds, **kwds)
for root in roots:
yield {vars_pol[i]: root[i] for i in range(len(vars_pol))}

def find_roots(pr, polynomials, method="groebner"):
"""
Returns a generator generating all roots of a polynomial in some unknowns.
The method used depends on the method parameter.
:param pr: the polynomial ring
:param polynomials: the reconstructed polynomials
:param method: the method to use, can be "groebner", "resultants", or "variety" (default: "groebner")
:return: a generator generating dicts of (x0: x0root, x1: x1root, ...) entries
"""
if pr.ngens() == 1:
print("Using univariate polynomial to find roots...")
for polynomial in polynomials:
yield from find_roots_univariate(pr.gen(), polynomial)
else:
# Always try this method because it can find roots the others can't.
yield from find_roots_gcd(pr, polynomials)

text
if method == "groebner": print("Using Groebner basis method to find roots...") yield from find_roots_groebner(pr, polynomials) elif method == "resultants": print("Using resultants method to find roots...") yield from find_roots_resultants(pr.gens(), polynomials) elif method == "variety": print("Using variety method to find roots...") yield from find_roots_variety(pr, polynomials) elif method == "newton": print("Using Newton-Jacobian method to find roots...") yield from find_roots_newton(pr, polynomials, [X[i] for i in range(pr.ngens())], maxiternum=2048, select_subpollst_loopnum=20, filter_small_solution_minbound=2**20)

def integer_multivariate(F, e, m, X, roots_method="groebner"):
"""
Computes small integer roots of a list of polynomials.
More information: Aono Y., "Minkowski sum based lattice construction for multivariate simultaneous Coppersmith's technique and applications to RSA" (Section 4)
:param F: the list of polynomials
:param e: the list of e values
:param m: the parameter m
:param X: an approximate bound on the x roots
:param roots_method: the method to use to find roots (default: "groebner")
:return: a generator generating small roots (dicts of (x0: x0root, x1: x1root, ..., y: yroot) entries) of the polynomials
"""
# We need lexicographic ordering for .lc() below.
pr = F[0].parent().change_ring(ZZ, order="lex")
x = pr.gens()

text
l = len(e) for k in range(l): F[k] = pr(F[k]) print("Generating shifts...") g = [] for k in range(l): gk = {} for i in range(m + 1): for j in range(i + 1): gk[i, j] = x[k] ** (i - j) * F[k] ** j * e[k] ** (m - j) g.append(gk) Ig = {} for tup in product(*g): g_ = prod(g[k][tup[k]] for k in range(l)) index = tuple(g_.exponents()[0]) if index not in Ig: Ig[index] = [] Ig[index].append(g_) shifts = [] for g in Ig.values(): gp = g[0] lc = gp.lc() for gi in g[1:]: lc, s, t = xgcd(lc, gi.lc()) gp = s * gp + t * gi shifts.append(gp) L, monomials = create_lattice(pr, shifts, X) L = reduce_lattice(L) polynomials = reconstruct_polynomials(L, None, prod(e) ** m, monomials, X) return find_roots(pr, polynomials, method=roots_method)

N = Integer(153098639535230461427067981471925418840838598338005180035334795636906122233147195601425250354397215037964687813767036136951747775082105669481477681815877751367786247871862319200300034505895599830252899234816623479894529037827470533297869568936005422622937802442126007732338221711447957108942595934348420152967)
e1 = Integer(26675174225063019792412591801113799130868333007861486357720578798352030914999710926292644671603600643369845213171396954004158372043593223603274138224817810197939227716991483870437629436082594926630937775718564690554680519001757082557490487364436474044434348412437933432480883175650430050712391958500932343901)
e2 = Integer(43950877784211432364704383093702059665468498126670135130662770287054349774316943441695986309047768423965591753129960761705976613430837039441242214744782506036708881166308335886272786409715558332802625980679432726451306523481798238346507261619084743433957076290727858148266709423500224748925763767341340730807)
PR = PolynomialRing(ZZ, 'x1,x2,y')
x1,x2,y = PR.gens()
f1 = x1*(y + N) + 1
f2 = x2*(y + N) + 1
F = [f1, f2]
e = [Integer(e1), Integer(e2)]
X = [2440, 2440, 3Integer(N**0.5)]
ntime = time.time()
roots = integer_multivariate(F, e, 7, X, roots_method="newton")
for root in roots:
print(root)
x1, x2, y = root[x1], root[x2], root[y]
"""
x1 = 425056759656315399797342966644832790449245965467229410652699919085739262843717808675032432598608708235471078070697911279881385070409
x2 = 753638940351900109017164154685639032346037179337416965910311598407836180623711810373474778430492270070277136284642540002302920095014
y = -24778346350052468148023771004107125434111681532299131289631490280954652440603554594365745756029378303024815711901975602340582270116648094635240160393328207
"""
d1 = (x1
(y + N) + 1)//e1
d2 = (x2*(y + N) + 1)//e2
c = Integer(93877717323267388927595086106164695425439307573037159369718380924127343418544815509333679743420188507149532789861080535086578879105173477672709484480809535461054572173818555600022970592546859742598747214025487041690467952494343056536435494716815059600034890617984024099540213482984895312025775728869974483561)
print(long_to_bytes(int(pow(c, d1*d2, N))))
print(f"time: {time.time()-ntime}s")
这份代码每次运行需要的时间都不同有时候20分钟有时候却需要50多分钟这是什么原理造成的.同一份代码为什么有时候运行时间长有时候运行时间短?

Réflexion

Explaining runtime variation factors

User is asking why runtime varies by 20-50 minutes. The factors include randomness in operations like random.shuffle and itertools.product, and the Newton method's dependence on initial conditions, which can lead to variable iterations. Additionally, lattice reduction’s performance may be influenced by floating-point accuracy, system threads, and memory management, all of which contribute to the overall runtime.

Identifying factors affecting runtime differences

The LLL/flatter algorithm might show non-deterministic behavior due to floating-point precision, parallelization, or early termination. Python big integers or Sage with GMP are deterministic when given same inputs. System state like CPU load, thermal throttling, or memory pressure can also affect runtime. We should cite the uploaded file.

Making runtime more consistent

To reduce runtime variation, set random.seed and adjust Newton method parameters. Use deterministic root methods like Grobner bases or Sage’s LLL with one thread. Disable turbo, pin CPU, and use stable hardware. For the code itself, ensure consistency with solve_root_jacobian_newton and manage iterations.

Addressing non-determinism and proposing fixes

The reconstruct_polynomials function checks modulus bounds, but the number of polynomials kept may vary depending on LLL results, affecting Newton's success. To reduce this randomness, I suggest setting random.seed(0), and removing or scaling random.shuffle. Also, adjusting thread settings can help manage variability in LLL’s output.

Explaining runtime variance and reducing it

The user is asking why the runtime varies, with differences suggesting algorithmic path impacts.

Here’s an outline of the factors contributing to the variance:

  1. Randomness: Random shuffle in the method leads to different polynomial subsets and starting points, affecting convergence.
  2. Newton Method: Convergence depends on start points, with retries and loops increasing variability.
  3. LLL/Flatter Reduction: Heuristic differences and multi-threading can alter results.
  4. External Factors: CPU load, thermal throttling, and memory can slow performance when under load.

To reduce the variance:

  • Fix random seed values and set random.seed(0) and set_random_seed(0) for consistency.

Thought for 48s

Réponse

你这份脚本里确实存在“同一输入、每次运行路径不同”的因素,所以出现 20 分钟和 50+ 分钟这种差距很正常。主要原因一般不是“电脑玄学”,而是算法本身带随机/启发式分支 + 外部程序/系统负载叠加导致的。(666.py)

1) 代码里有显式随机:每次走的分支不一样

你在 solve_root_jacobian_newton() 里每轮都会:

  • random.shuffle(pollst_local):把多项式列表随机打乱 (666.py)
  • 然后对不同符号组合 itertools.product([1, -1], repeat=len(vars)) 尝试不同初值点 (666.py)

这会直接导致:

  • Newton 迭代选到的“方程子集/顺序”不同
  • 初始点不同
  • 于是有时候很快收敛到整数根;有时候一直不收敛或收敛到“坏的局部点”,就会把 20 次循环(select_subpollst_loopnum=20)× 8 个符号组合(3 变量→8 种)都跑完,时间自然暴涨。(666.py)

这类“随机重启/随机扰动找解”的算法,运行时间本来就会呈现“大波动”——找到解的那次就早停,找不到就跑很久。

2) Newton-Jacobian 本身是“是否快速收敛”高度不稳定的

solve_root_jacobian_newton_internal() 里,每一步都要:

  • 计算 Jacobian
  • 解线性方程 jac_eval.solve_right(...)
  • 然后更新点并四舍五入到整数 (666.py)

Newton 法有几个典型特征:

  • 对初始点非常敏感:离真实根近→很快;离得远/在奇异点附近→可能震荡、发散、或者卡住。
  • Jacobian 可能病态/不可逆:你这里 solve_right 失败会直接返回 None,然后继续试下一组;这会造成“很多次失败尝试”的额外耗时。(666.py)
  • 你允许最多 maxiternum=2048,一旦某次尝试进入“慢收敛区”,单次尝试就可能非常耗时。(666.py)

3) LLL/格基约简(flatter)属于启发式重数值流程,结果“略变”就会放大到后续求根难度

你这里格约简调用了外部 flatter 程序:check_output(["flatter"], input=z.encode()) (666.py)

这类 LLL/近似约简实现常见导致波动的点:

  • 可能用浮点/近似 Gram–Schmidt,分支判断受细微数值差异影响(不同 CPU 指令、线程调度、库版本都可能让“约简后的基”略不同)。
  • 约简结果只要略不同,你后面 reconstruct_polynomials() 过滤“行太大”的判断就会保留/丢弃不同多项式:
    if norm_squared * w >= modulus**2: ignore (666.py)
    这会让最终进入 Newton 的方程组“难易程度”完全不同,从而时间差巨大。

4) 机器环境因素会进一步放大:CPU/内存/线程/缓存

就算算法路径固定,下面这些也会让 20 分钟→50 分钟:

  • CPU 是否在跑别的任务、是否降频/过热降频
  • 内存是否紧张导致 swap(LLL 大矩阵很吃内存带宽)
  • flatter / Sage / 底层线代库是否启用多线程:线程调度不同→时间抖动明显
  • 首次运行加载 Sage/库的缓存效应(通常只影响一小段,但叠加也会有)

怎么让运行时间更“可控/可复现”

下面是最有效的几条(从你代码最相关的开始):

A. 固定随机性(最关键)

在脚本最开始(任何 shuffle 之前)加:

python
import random random.seed(0) # Sage 里如果也用到随机(某些算法/启发式可能会用) from sage.all import set_random_seed set_random_seed(0)

这样至少 Newton 部分的随机路径会稳定很多。(666.py)

B. 直接去掉 shuffle(让路径完全确定)

random.shuffle(pollst_local) 注释掉或改成固定排序(如果可排序):

python
# random.shuffle(pollst_local)

这会牺牲一点“撞大运找到好路径”的概率,但能显著减少“这次快那次慢”。

C. 限制外部/线代库线程数(减少抖动)

在运行前设置环境变量(bash 示例):

bash
export OMP_NUM_THREADS=1 export MKL_NUM_THREADS=1 export OPENBLAS_NUM_THREADS=1

很多线代/约简实现多线程会让耗时波动更大;固定为 1 线程往往更稳定(不一定更快)。

D. 给每个阶段打点,确认到底慢在哪里

你可以在 reduce_latticereconstruct_polynomialsfind_roots_newton 前后打印 time.time(),先定位是:

  • LLL/flatter 慢,还是
  • Newton 尝试次数/迭代次数变多导致慢

这样你就能对症下药(比如 Newton 失败太多就调初值策略/参数,而不是纠结 LLL)。


如果你愿意把一次“20 分钟”和一次“50 分钟”运行时的输出日志(尤其是 Reducing a ... lattice...Reconstructed ... polynomialsNewton: ... 这些行)贴出来,我可以帮你精准判断:到底是 LLL/重构阶段差异导致后面难解,还是 Newton 随机重启导致尝试次数爆炸。

Partager cette Q&R