热门角色不仅是灵感来源,更是你的效率助手。通过精挑细选的角色提示词,你可以快速生成高质量内容、提升创作灵感,并找到最契合你需求的解决方案。让创作更轻松,让价值更直接!
我们根据不同用户需求,持续更新角色库,让你总能找到合适的灵感入口。
一键代码体检,结构更清晰、性能更强劲、安全更可靠,让代码整洁易维护!
下面是对现有代码的审查要点与优化方案,涵盖性能、结构、可读性、安全、资源效率与错误处理,并附上重构后的示例代码(db.js、userService.js、routes/users.js)。重点解决 SQL 拼接风险、缓存策略、分页默认值、阻塞 I/O 以及 N+1 查询问题。
问题清单与改进建议
重构后的代码示例
db.js
// db.js
const sqlite3 = require('sqlite3').verbose();
const path = require('path');
const db = new sqlite3.Database(
path.resolve(__dirname, 'app.db'),
sqlite3.OPEN_READWRITE | sqlite3.OPEN_CREATE,
(err) => {
if (err) console.error('Failed to open DB', err);
}
);
// 基础优化:提高读并发、启用外键、避免长时间锁忙
db.exec(`
PRAGMA journal_mode = WAL;
PRAGMA foreign_keys = ON;
PRAGMA busy_timeout = 3000;
`);
function all(sql, params = []) {
return new Promise((resolve, reject) => {
db.all(sql, params, (err, rows) => (err ? reject(err) : resolve(rows)));
});
}
function get(sql, params = []) {
return new Promise((resolve, reject) => {
db.get(sql, params, (err, row) => (err ? reject(err) : resolve(row)));
});
}
function run(sql, params = []) {
return new Promise((resolve, reject) => {
db.run(sql, params, function (err) {
if (err) return reject(err);
resolve({ changes: this.changes, lastID: this.lastID });
});
});
}
function close() {
return new Promise((resolve, reject) => {
db.close((err) => (err ? reject(err) : resolve()));
});
}
module.exports = { db, all, get, run, close };
userService.js
// userService.js
const { all, get } = require('./db');
// 简易带 TTL 的缓存,限制最大容量防止内存膨胀
const MAX_CACHE_ENTRIES = 100;
const DEFAULT_TTL_MS = 60 * 1000; // 1 分钟
class SimpleCache {
constructor(max = MAX_CACHE_ENTRIES, ttl = DEFAULT_TTL_MS) {
this.max = max;
this.ttl = ttl;
this.store = new Map();
}
set(key, value) {
const expires = Date.now() + this.ttl;
this.store.set(key, { value, expires });
// 简单的 FIFO 清理
while (this.store.size > this.max) {
const firstKey = this.store.keys().next().value;
this.store.delete(firstKey);
}
}
get(key) {
const entry = this.store.get(key);
if (!entry) return undefined;
if (entry.expires < Date.now()) {
this.store.delete(key);
return undefined;
}
return entry.value;
}
}
const cache = new SimpleCache();
function clampPage(page) {
const p = Number.isFinite(page) && page > 0 ? Math.floor(page) : 1;
return p;
}
function clampPageSize(size) {
const s = Number.isFinite(size) && size > 0 ? Math.floor(size) : 20;
return Math.min(s, 100); // 限制最大 pageSize 防止重查询
}
// 统一的获取用户列表(带分页与搜索),避免 N+1 查询
async function getUsers(page = 1, pageSize = 20, search) {
page = clampPage(page);
pageSize = clampPageSize(pageSize);
const key = `users:${page}:${pageSize}:${search || ''}`;
const cached = cache.get(key);
if (cached) return cached;
const whereParts = [];
const params = [];
if (typeof search === 'string' && search.trim().length > 0) {
const term = search.trim();
// 可选:限制搜索词长度,避免大 LIKE 导致性能问题
if (term.length > 100) {
throw new Error('Search term too long');
}
whereParts.push('name LIKE ?');
params.push(`%${term}%`);
}
const whereClause = whereParts.length ? `WHERE ${whereParts.join(' AND ')}` : '';
const offset = (page - 1) * pageSize;
// 主查询:分页获取用户
const users = await all(
`SELECT id, name, created_at
FROM users
${whereClause}
ORDER BY created_at DESC
LIMIT ? OFFSET ?`,
[...params, pageSize, offset]
);
// 总数查询:用于前端分页
const totalRow = await get(
`SELECT COUNT(*) AS count FROM users ${whereClause}`,
params
);
const total = totalRow?.count || 0;
// 批量查询角色,避免 N+1
const userIds = users.map((u) => u.id);
let rolesByUser = {};
if (userIds.length > 0) {
const placeholders = userIds.map(() => '?').join(',');
const roleRows = await all(
`SELECT ur.user_id, r.id AS role_id, r.name AS role_name
FROM roles r
JOIN user_roles ur ON ur.role_id = r.id
WHERE ur.user_id IN (${placeholders})`,
userIds
);
rolesByUser = roleRows.reduce((acc, row) => {
if (!acc[row.user_id]) acc[row.user_id] = [];
acc[row.user_id].push({ id: row.role_id, name: row.role_name });
return acc;
}, {});
}
const data = users.map((u) => ({
...u,
roles: rolesByUser[u.id] || [],
}));
const result = { data, total, page, pageSize };
cache.set(key, result);
return result;
}
module.exports = { getUsers };
routes/users.js
// routes/users.js
const express = require('express');
const router = express.Router();
const { getUsers } = require('../userService');
// 简化:使用 async 处理器 + 参数校验
router.get('/users', async (req, res, next) => {
try {
const pageRaw = Number.parseInt(req.query.page, 10);
const pageSizeRaw = Number.parseInt(req.query.pageSize, 10);
const search = req.query.search;
const page = Number.isFinite(pageRaw) ? pageRaw : 1;
const pageSize = Number.isFinite(pageSizeRaw) ? pageSizeRaw : 20;
const result = await getUsers(page, pageSize, search);
res.json({
data: result.data,
total: result.total,
page: result.page,
pageSize: result.pageSize,
});
} catch (err) {
// 参数错误或查询错误
// 可细分:如果是参数错误返回 400,其他返回 500
if (err && err.message === 'Search term too long') {
return res.status(400).json({ error: 'search parameter too long' });
}
console.error('[GET /users] error:', err);
res.status(500).json({ error: 'server error' });
}
});
module.exports = router;
进一步的最佳实践建议
这些改动能显著提高性能与安全性,减少内存风险,提升代码的可维护性与一致性。如果需要,我可以继续将用户创建、更新等写操作也适配为参数化查询并集成缓存失效逻辑。
下面是针对你给出的代码的全面审查与重构建议,重点覆盖安全性、资源效率、可读性、错误处理和性能优化,并附上可直接替换的改进版代码示例。
主要问题与风险
重构目标
建议的改进版代码(views.py) 说明:
代码: from django.views.decorators.http import require_POST from django.http import JsonResponse from django.core.files.storage import default_storage from django.core.files.base import ContentFile from django.conf import settings import logging, os, re, uuid, time from io import BytesIO from pathlib import Path from PIL import Image, ImageOps, UnidentifiedImageError
logger = logging.getLogger(name)
MAX_UPLOAD_BYTES = 10 * 1024 * 1024 # 10MB上限,可根据业务调整 MAX_DIMENSION = 4096 # 单边最大像素 DEFAULT_SIZE = (1024, 1024) UPLOAD_SUBDIR = 'uploads' # 在MEDIA_ROOT/uploads之下 Image.MAX_IMAGE_PIXELS = 20_000_000 # 防压缩炸弹(20MP,按需调整)
size_pattern = re.compile(r'^\s*(\d{1,5})x(\d{1,5})\s*$')
def parse_size(raw: str | None) -> tuple[int, int]: if not raw: return DEFAULT_SIZE m = size_pattern.match(raw) if not m: raise ValueError('size参数格式应为 WxH(如 1024x1024)') w, h = int(m.group(1)), int(m.group(2)) if w <= 0 or h <= 0 or w > MAX_DIMENSION or h > MAX_DIMENSION: raise ValueError(f'尺寸超出限制,最大{MAX_DIMENSION}x{MAX_DIMENSION}') return w, h
@require_POST def upload_image(request): # 1) 基本校验 f = request.FILES.get('file') if not f: return JsonResponse({'error': '缺少file文件'}, status=400)
try:
w, h = parse_size(request.GET.get('size'))
except ValueError as e:
return JsonResponse({'error': str(e)}, status=400)
# 2) 大小限制(双重:基于属性和实际读取)
if getattr(f, 'size', None) and f.size > MAX_UPLOAD_BYTES:
return JsonResponse({'error': '文件过大'}, status=413)
data = f.read(MAX_UPLOAD_BYTES + 1)
if len(data) > MAX_UPLOAD_BYTES:
return JsonResponse({'error': '文件过大'}, status=413)
bio = BytesIO(data)
# 3) 安全打开与验证图片
try:
img = Image.open(bio)
img.verify() # 验证文件头
bio.seek(0)
img = Image.open(bio) # 重新打开用于处理
img = ImageOps.exif_transpose(img) # 处理EXIF旋转
except UnidentifiedImageError:
return JsonResponse({'error': '无效的图片文件'}, status=400)
except Exception as e:
logger.exception('图片验证失败: %s', e)
return JsonResponse({'error': '图片处理失败'}, status=400)
# 4) 转换与缩放(CPU密集,必要时迁移到异步任务)
try:
# 统一输出为JPEG,若需保留格式可根据img.format决定
img = img.convert('RGB')
img = img.resize((w, h), resample=Image.Resampling.LANCZOS)
except Exception as e:
logger.exception('图片缩放失败: %s', e)
return JsonResponse({'error': '缩放失败'}, status=400)
# 5) 保存到storage,生成安全文件名
buf = BytesIO()
try:
img.save(buf, format='JPEG', quality=90, optimize=True)
except Exception as e:
logger.exception('图片编码失败: %s', e)
return JsonResponse({'error': '图片编码失败'}, status=500)
finally:
try:
img.close()
except Exception:
pass
buf.seek(0)
filename = f'{uuid.uuid4().hex}_{w}x{h}.jpg'
rel_path = os.path.join(UPLOAD_SUBDIR, filename)
try:
saved_path = default_storage.save(rel_path, ContentFile(buf.read()))
except Exception as e:
logger.exception('存储失败: %s', e)
return JsonResponse({'error': '存储失败'}, status=500)
# 6) 返回URL(避免暴露服务器路径)
try:
url = default_storage.url(saved_path)
except Exception:
url = f'{getattr(settings, "MEDIA_URL", "/media/")}{saved_path}'
return JsonResponse({'ok': True, 'url': url, 'width': w, 'height': h})
def clean_tmp(tmp_dir: str | Path = '/tmp/uploads', max_age_seconds: int = 24 * 3600) -> int: logger = logging.getLogger(name) base = Path(tmp_dir) if not base.exists() or not base.is_dir(): logger.warning('临时目录不存在或不可用: %s', tmp_dir) return 0
now = time.time()
removed = 0
for entry in os.scandir(base):
try:
if not entry.is_file(follow_symlinks=False):
continue
if not entry.name.endswith('.tmp'):
continue
age = now - entry.stat().st_mtime
if age > max_age_seconds:
os.unlink(entry.path)
removed += 1
except Exception as e:
logger.warning('删除临时文件失败 %s: %s', entry.path, e)
return removed
设计与实践说明
额外建议(可选)
以上改造能在不引入额外依赖的前提下显著提升安全性、可读性与可靠性;对于更高的吞吐与稳定性,建议结合队列异步处理与云存储进一步演进。
下面是针对你现有代码的系统化优化建议与示例改造,重点覆盖结构优化、性能优化、可读性增强、安全性改进与错误处理优化,并结合 Spring/JdbcTemplate 的最佳实践。建议按模块逐步落地,避免一次性大改造成风险。
一、问题诊断(现状与风险)
二、重构总体思路
三、示例改造代码(精简可直接落地)
package com.example.demo.dto;
import jakarta.validation.constraints.NotBlank;
import jakarta.validation.constraints.NotNull;
import jakarta.validation.constraints.Positive;
import jakarta.validation.constraints.Size;
import java.math.BigDecimal;
import java.time.OffsetDateTime;
import java.util.List;
public class CreateOrderRequest {
@NotBlank
@Size(max = 100)
private String customer;
@NotNull
@Positive
private BigDecimal amount;
// getters/setters
}
public class OrderItemDto {
private Long id;
private Long orderId;
private String sku;
private Integer qty;
private BigDecimal price;
// getters/setters
}
public class OrderDto {
private Long id;
private String customer;
private BigDecimal amount;
private OffsetDateTime createdAt;
private List<OrderItemDto> items;
// getters/setters
}
public class PageResponse<T> {
private List<T> content;
private long total; // 可选:返回总数,如果需要 count
private int page;
private int size;
// getters/setters, 构造器
}
public class OrderCreatedResponse {
private Long id;
private String status;
// getters/setters, 构造器
}
public class ExportJobResponse {
private String jobId;
private String status; // QUEUED/RUNNING/DONE/FAILED
private String filePath; // 完成后填充
// getters/setters
}
package com.example.demo;
import com.example.demo.dto.*;
import jakarta.validation.constraints.Max;
import jakarta.validation.constraints.Min;
import jakarta.validation.constraints.Size;
import org.springframework.http.ResponseEntity;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
@RestController
@RequestMapping("/orders")
@Validated
public class OrderController {
private final OrderService service;
public OrderController(OrderService service) {
this.service = service;
}
@GetMapping("/search")
public ResponseEntity<PageResponse<OrderDto>> search(
@RequestParam(required = false) @Size(max = 100) String keyword,
@RequestParam(defaultValue = "0") @Min(0) int page,
@RequestParam(defaultValue = "20") @Min(1) @Max(200) int size) {
PageResponse<OrderDto> result = service.search(keyword, page, size);
return ResponseEntity.ok(result);
}
@PostMapping("/create")
public ResponseEntity<OrderCreatedResponse> create(@RequestBody @Validated CreateOrderRequest body) {
Long id = service.create(body);
return ResponseEntity.ok(new OrderCreatedResponse(id, "ok"));
}
// 将导出独立成异步任务
@PostMapping("/export")
public ResponseEntity<ExportJobResponse> export(
@RequestParam(required = false) @Size(max = 100) String keyword,
@RequestParam(defaultValue = "0") @Min(0) int page,
@RequestParam(defaultValue = "100") @Min(1) @Max(1000) int size) {
ExportJobResponse job = service.exportAsync(keyword, page, size);
return ResponseEntity.accepted().body(job);
}
@GetMapping("/export/{jobId}")
public ResponseEntity<ExportJobResponse> exportStatus(@PathVariable String jobId) {
return ResponseEntity.ok(service.getExportStatus(jobId));
}
}
package com.example.demo;
import com.example.demo.dto.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.namedparam.*;
import org.springframework.jdbc.support.GeneratedKeyHolder;
import org.springframework.jdbc.support.KeyHolder;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.io.BufferedWriter;
import java.io.IOException;
import java.math.BigDecimal;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
@Service
public class OrderService {
private static final Logger log = LoggerFactory.getLogger(OrderService.class);
private final NamedParameterJdbcTemplate jdbc;
private final ThreadPoolTaskExecutor executor;
// 简单内存任务管理示例,生产可替换为持久化
private final Map<String, ExportJobResponse> jobs = new ConcurrentHashMap<>();
public OrderService(NamedParameterJdbcTemplate jdbc) {
this.jdbc = jdbc;
// 可在配置类中定义更完善的线程池
this.executor = new ThreadPoolTaskExecutor();
this.executor.setCorePoolSize(2);
this.executor.setMaxPoolSize(4);
this.executor.setQueueCapacity(100);
this.executor.setThreadNamePrefix("export-");
this.executor.initialize();
}
@Transactional
public Long create(CreateOrderRequest body) {
String sql = "INSERT INTO orders(customer, amount, created_at) VALUES(:customer, :amount, :createdAt)";
MapSqlParameterSource params = new MapSqlParameterSource()
.addValue("customer", body.getCustomer().trim())
.addValue("amount", body.getAmount())
.addValue("createdAt", OffsetDateTime.now(ZoneOffset.UTC)); // 让驱动处理时间类型
KeyHolder kh = new GeneratedKeyHolder();
int r = jdbc.update(sql, params, kh);
if (r <= 0) {
throw new RuntimeException("Insert failed");
}
Number key = kh.getKey();
return key == null ? null : key.longValue();
}
@Transactional(readOnly = true)
public PageResponse<OrderDto> search(String keyword, int page, int size) {
String pattern = likePattern(keyword);
MapSqlParameterSource params = new MapSqlParameterSource()
.addValue("pattern", pattern)
.addValue("limit", size)
.addValue("offset", page * size);
String baseWhere = pattern == null ? "" : " WHERE customer LIKE :pattern ESCAPE '\\\\' ";
String sql = "SELECT id, customer, amount, created_at FROM orders" + baseWhere +
" ORDER BY created_at DESC LIMIT :limit OFFSET :offset";
List<OrderDto> orders = jdbc.query(sql, params, orderMapper());
// N+1 修复:批量查询 items
List<Long> ids = orders.stream().map(OrderDto::getId).collect(Collectors.toList());
Map<Long, List<OrderItemDto>> itemsMap = Collections.emptyMap();
if (!ids.isEmpty()) {
MapSqlParameterSource p2 = new MapSqlParameterSource().addValue("ids", ids);
String sqlItems = "SELECT id, order_id, sku, qty, price FROM order_items WHERE order_id IN (:ids)";
List<OrderItemDto> items = jdbc.query(sqlItems, p2, orderItemMapper());
itemsMap = items.stream().collect(Collectors.groupingBy(OrderItemDto::getOrderId));
}
orders.forEach(o -> o.setItems(itemsMap.getOrDefault(o.getId(), Collections.emptyList())));
// 可选:总数统计(若页面需要)
long total = countOrders(pattern);
PageResponse<OrderDto> resp = new PageResponse<>();
resp.setContent(orders);
resp.setTotal(total);
resp.setPage(page);
resp.setSize(size);
return resp;
}
public ExportJobResponse exportAsync(String keyword, int page, int size) {
String jobId = UUID.randomUUID().toString();
ExportJobResponse job = new ExportJobResponse();
job.setJobId(jobId);
job.setStatus("QUEUED");
jobs.put(jobId, job);
executor.execute(() -> {
ExportJobResponse j = jobs.get(jobId);
j.setStatus("RUNNING");
try {
PageResponse<OrderDto> data = search(keyword, page, size);
Path file = Files.createTempFile("orders-", ".csv");
try (BufferedWriter bw = Files.newBufferedWriter(file)) {
// 简易CSV写出,生产建议使用 Commons CSV 并处理转义
bw.write("id,customer,amount,created_at\n");
for (OrderDto o : data.getContent()) {
bw.write(o.getId() + "," + safeCsv(o.getCustomer()) + "," + o.getAmount() + "," + o.getCreatedAt() + "\n");
}
}
j.setFilePath(file.toAbsolutePath().toString());
j.setStatus("DONE");
log.info("Export done: {}", j.getFilePath());
} catch (Exception e) {
log.error("Export job failed", e);
j.setStatus("FAILED");
}
});
return job;
}
public ExportJobResponse getExportStatus(String jobId) {
ExportJobResponse job = jobs.get(jobId);
if (job == null) {
ExportJobResponse missing = new ExportJobResponse();
missing.setJobId(jobId);
missing.setStatus("NOT_FOUND");
return missing;
}
return job;
}
private RowMapper<OrderDto> orderMapper() {
return (rs, rowNum) -> {
OrderDto o = new OrderDto();
o.setId(rs.getLong("id"));
o.setCustomer(rs.getString("customer"));
o.setAmount(rs.getBigDecimal("amount"));
// 建议数据库使用 timestamp with time zone;驱动映射为 OffsetDateTime
OffsetDateTime odt = rs.getObject("created_at", OffsetDateTime.class);
o.setCreatedAt(odt);
return o;
};
}
private RowMapper<OrderItemDto> orderItemMapper() {
return (rs, rowNum) -> {
OrderItemDto i = new OrderItemDto();
i.setId(rs.getLong("id"));
i.setOrderId(rs.getLong("order_id"));
i.setSku(rs.getString("sku"));
i.setQty(rs.getInt("qty"));
i.setPrice(rs.getBigDecimal("price"));
return i;
};
}
private long countOrders(String pattern) throws DataAccessException {
String where = pattern == null ? "" : " WHERE customer LIKE :pattern ESCAPE '\\\\' ";
String sql = "SELECT COUNT(*) FROM orders" + where;
MapSqlParameterSource p = new MapSqlParameterSource().addValue("pattern", pattern);
Long total = jdbc.queryForObject(sql, p, Long.class);
return total == null ? 0L : total;
}
// 处理 LIKE 特殊字符转义,并限制空关键词
private String likePattern(String keyword) {
if (keyword == null || keyword.isBlank()) return null;
String k = keyword.trim();
// 转义 % 和 _(以 \ 为转义符)
k = k.replace("\\", "\\\\").replace("%", "\\%").replace("_", "\\_");
return "%" + k + "%";
}
private String safeCsv(String s) {
if (s == null) return "";
String v = s.replace("\"", "\"\"");
return "\"" + v + "\"";
}
}
package com.example.demo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.dao.DataAccessException;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.validation.BindException;
import org.springframework.web.bind.MethodArgumentNotValidException;
import org.springframework.web.bind.annotation.*;
import java.util.HashMap;
import java.util.Map;
@ControllerAdvice
public class GlobalExceptionHandler {
private static final Logger log = LoggerFactory.getLogger(GlobalExceptionHandler.class);
@ExceptionHandler({MethodArgumentNotValidException.class, BindException.class, IllegalArgumentException.class})
public ResponseEntity<Object> handleBadRequest(Exception e) {
log.warn("Bad request", e);
Map<String, Object> body = new HashMap<>();
body.put("error", "BAD_REQUEST");
body.put("message", e.getMessage());
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(body);
}
@ExceptionHandler(DataAccessException.class)
public ResponseEntity<Object> handleDb(Exception e) {
log.error("DB error", e);
Map<String, Object> body = new HashMap<>();
body.put("error", "DB_ERROR");
body.put("message", "Database operation failed");
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(body);
}
@ExceptionHandler(Exception.class)
public ResponseEntity<Object> handleGeneric(Exception e) {
log.error("Server error", e);
Map<String, Object> body = new HashMap<>();
body.put("error", "INTERNAL_ERROR");
body.put("message", "Unexpected error");
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(body);
}
}
四、具体优化点清单(便于检查落地)
五、可选进一步改进
以上改造可以逐步分阶段实施:先分页与参数化 SQL,再修复 N+1,随后统一错误处理与日志,最后迁移导出到异步。这样能快速消除高风险问题,同时稳步提升可维护性与性能。
针对 对现有代码进行全面审查,提供结构、性能、可读性和安全性改进建议 的日常工作场景,该工具旨在解决以下问题:
工具名称: 代码优化专家
功能简介: 该提示词帮助开发者对现有代码进行全面审查,提供结构优化、性能提升、可读性增强和安全性改进建议。结合重构技巧、最佳实践和主流编码规范,确保代码高效、整洁且易维护。
从代码审查到优化实施的全流程协作,确保代码质量持续提升。
借助提示词轻松优化复杂后端逻辑,让API层次结构清晰、性能更高,开发效率显著提升。
通过系统化的改进建议,学习代码优化的最佳实践,快速掌握标准化编程技巧。
帮助审查团队代码质量,制定更高效的开发规范与优化策略,推动项目高效交付。
将模板生成的提示词复制粘贴到您常用的 Chat 应用(如 ChatGPT、Claude 等),即可直接对话使用,无需额外开发。适合个人快速体验和轻量使用场景。
把提示词模板转化为 API,您的程序可任意修改模板参数,通过接口直接调用,轻松实现自动化与批量处理。适合开发者集成与业务系统嵌入。
在 MCP client 中配置对应的 server 地址,让您的 AI 应用自动调用提示词模板。适合高级用户和团队协作,让提示词在不同 AI 工具间无缝衔接。
免费获取高级提示词-优惠即将到期