العودة إلى القوالب
قالب سير عمل
Schedule Trigger2 | Operasyon için n8n Otomasyon İş Akışı Şablonu (HTTP)
n8n için hazır otomasyon şablonu: Schedule Trigger2. 21 düğüm. Entegrasyonlar: HTTP. JSON'u kopyalayıp n8n'e içe aktarın.
21 العُقدSchedule_Trigger2-workflow.json
{"meta": {"instanceId": "6ee5d038b3d9fc2c09da6a02264d9559941160ab8b9dd73e6ca57abf9fcd1d8b"},"nodes": [{"id": "a58752a1-e536-4ba4-8cf6-77e5462a6887","name": "Schedule Trigger2","type": "n8n-nodes-base.scheduleTrigger","position": [-3504,448],"parameters": {"rule": {"interval": [{}]}},"typeVersion": 1.2},{"id": "a00fa7b7-bacc-47da-9dd2-798d0d079c4d","name": "Initiate PDF Extraction","type": "n8n-nodes-base.httpRequest","position": [-2096,304],"parameters": {"url": "url-for-google-gemini-pdf-processing","method": "POST","options": {},"sendBody": true,"sendHeaders": true,"bodyParameters": {"parameters": [{"name": "signed_s3_url","value": "={{ $json.url }}"}]},"headerParameters": {"parameters": [{"name": "x-api-key","value": "XXXXXXXXX"}]}},"typeVersion": 4.2},{"id": "16c2a856-ef46-4894-a054-273d743ed989","name": "PDF Status Polling","type": "n8n-nodes-base.httpRequest","position": [-1872,304],"parameters": {"url": "=polling_url_for_pdf_parsing","options": {},"sendHeaders": true,"headerParameters": {"parameters": [{"name": "x-api-key","value": "xxxxxx"}]}},"typeVersion": 4.2},{"id": "9a1810ad-1d8a-4904-ae0f-271bbc78c269","name": "Wait","type": "n8n-nodes-base.wait","position": [-1648,208],"webhookId": "e3c285ba-1289-4c44-a641-453463f20d73","parameters": {},"typeVersion": 1.1},{"id": "043faeff-13d9-4c94-a42d-488a2cce3f63","name": "Extract IdeaBlocks from API Response","type": "n8n-nodes-base.set","position": [656,384],"parameters": {"options": {},"assignments": {"assignments": [{"id": "8133796e-0114-432b-83e6-fb3a2c5ca4be","name": "manual-section","type": "string","value": "={{ $json.choices[0].message.content }}"}]}},"typeVersion": 3.4},{"id": "d077e83b-b65e-4f01-97a8-c8fb8c91150f","name": "Convert to File","type": "n8n-nodes-base.convertToFile","position": [1376,208],"parameters": {"options": {"encoding": "utf8","fileName": "={{\n (() => {\n const raw = $json.output || '';\n // Remove leading \"# \" or \"#\"\n const sanitized = raw.replace(/^#\\s*/, '');\n // Split into lines, take the first non-empty one\n const lines = sanitized.split('\\n').map(s => s.trim()).filter(Boolean);\n const headline = lines[0] || '';\n // Remove unwanted punctuation for filename (last char, if any)\n return headline.replace(/[!.?\\\\]$/, '');\n })()\n}}.txt"},"operation": "toText","sourceProperty": "output"},"typeVersion": 1.1},{"id": "eab33a5c-f147-4a6c-95be-b2cb34be3996","name": "Technical Manual Prompt Payload Assembly","type": "n8n-nodes-base.code","position": [-272,384],"parameters": {"jsCode": "/**\n * n8n Code node\n * Input: items where each item.json = { chunk: string, order: string }\n * Output: array of payload objects with primary, proceeding, and following sections\n */\n\nconst inputItems = $input.all();\n\n// If a single item contains the whole array under a property (e.g. { data: [...] }),\n// you can adapt the next line accordingly.\n// Example: const chunks = inputItems[0]?.json?.data ?? inputItems.map(item => item.json);\nconst chunks = inputItems.map(item => item.json);\n\n// Sort by numeric order to ensure correct sequence\nchunks.sort((a, b) => Number(a.order) - Number(b.order));\n\nconst results = chunks.map((primaryChunk, index) => {\n const prevChunk = chunks[index - 1] ?? null;\n const nextChunk = chunks[index + 1] ?? null;\n\n const payload = [\n '### Primary ###',\n '---',\n primaryChunk.chunk ?? '',\n '---',\n '### Proceeding ###',\n '---',\n prevChunk ? prevChunk.chunk ?? '' : '',\n '---',\n '### Following ###',\n '---',\n nextChunk ? nextChunk.chunk ?? '' : '',\n ].join('\\n');\n\n return {\n json: {\n payload,\n 'primary-order': primaryChunk.order ?? '',\n 'proceeding-order': prevChunk?.order ?? '',\n 'following-order': nextChunk?.order ?? '',\n 'payload-order': String(index + 1),\n },\n };\n});\n\nreturn results;"},"typeVersion": 2},{"id": "1a07e7f5-8ea1-4cdc-bb90-301eb2f0a037","name": "Technical Manual Split Chunks","type": "n8n-nodes-base.code","position": [-720,304],"parameters": {"jsCode": "// n8n Function node code\nconst H1_LIMIT = 4000;\nconst H2_LIMIT = 5000;\n\nfunction splitByHeading(text, level) {\n const src = String(text ?? '');\n if (!src.trim()) return [];\n\n const lines = src.split(/\\r?\\n/);\n const headingIdxs = [];\n let inFence = null;\n\n const fenceRe = /^\\s*(`{3,}|~{3,})/;\n const headingRe = /^[ \\t]{0,3}(#{1,6})[ \\t]+(.+?)\\s*#*\\s*$/;\n\n for (let i = 0; i < lines.length; i++) {\n const line = lines[i];\n\n const fm = line.match(fenceRe);\n if (fm) {\n const fenceChar = fm[1][0];\n if (!inFence) inFence = fenceChar;\n else if (inFence === fenceChar) inFence = null;\n continue;\n }\n\n if (!inFence) {\n const hm = line.match(headingRe);\n if (hm && hm[1].length === level) {\n headingIdxs.push(i);\n }\n }\n }\n\n if (headingIdxs.length === 0) {\n const only = lines.join('\\n').trim();\n return only ? [only] : [];\n }\n\n const sections = [];\n for (let k = 0; k < headingIdxs.length; k++) {\n const start = headingIdxs[k];\n const end = (k + 1 < headingIdxs.length) ? headingIdxs[k + 1] : lines.length;\n\n let sectionLines = lines.slice(start, end);\n\n if (k === 0 && start > 0) {\n sectionLines = lines.slice(0, start).concat(sectionLines);\n }\n\n const section = sectionLines.join('\\n').trim();\n if (section) sections.push(section);\n }\n\n return sections;\n}\n\nconst rawInput = $json.output ?? '';\nconst text = typeof rawInput === 'string' ? rawInput : String(rawInput);\n\nconst finalChunks = [];\nconst h1Sections = splitByHeading(text, 1);\nconst topLevelSections = h1Sections.length ? h1Sections : [text.trim()].filter(Boolean);\n\nfor (const h1 of topLevelSections) {\n if (h1.length <= H1_LIMIT) {\n finalChunks.push(h1);\n continue;\n }\n\n const h2Sections = splitByHeading(h1, 2);\n const secondLevelSections = h2Sections.length ? h2Sections : [h1];\n\n for (const h2 of secondLevelSections) {\n if (h2.length <= H2_LIMIT) {\n finalChunks.push(h2);\n continue;\n }\n\n const h3Sections = splitByHeading(h2, 3);\n if (h3Sections.length) {\n finalChunks.push(...h3Sections);\n } else {\n finalChunks.push(h2);\n }\n }\n}\n\nreturn finalChunks.map((chunk, idx) => ({\n json: {\n chunk,\n order: String(idx + 1),\n },\n}));"},"typeVersion": 2},{"id": "4bc5a0b3-c468-4428-ac01-7479f974f5a8","name": "Strip and Clean to Aggregate XML","type": "n8n-nodes-base.set","position": [1184,208],"parameters": {"options": {},"assignments": {"assignments": [{"id": "10b6a1df-b189-4ebb-9fa8-2fcedb14509f","name": "output","type": "string","value": "={{ String($items()\n .flatMap(i => i.json['manual-sections'] || [])\n .flatMap(sec => [].concat(sec?.['manual-section'] || []))\n .map(c => c?.choices?.[0]?.message?.content\n ?? c?.choices?.[0]?.delta?.content\n ?? (typeof c === 'string' ? c : c?.content ?? ''))\n .join(''))\n .replace(/```[\\w#+.-]*\\s*\\r?\\n?|```/g, '') }}"}]}},"typeVersion": 3.4},{"id": "71e3e38e-b653-47d5-9924-3dd9d28cb401","name": "Upload Blockified Manual","type": "n8n-nodes-base.googleDrive","position": [1664,416],"parameters": {"name": "=blockified-manual.txt","driveId": {"__rl": true,"mode": "list","value": "My Drive"},"options": {},"folderId": {"__rl": true,"mode": "list","value": "1yVqBHUS7RZqNY1cj6IYUOQ_eLVDjA7y5","cachedResultUrl": "https://drive.google.com/drive/folders/1yVqBHUS7RZqNY1cj6IYUOQ_eLVDjA7y5","cachedResultName": "n8n-blockify-manual-extraction"}},"credentials": {"googleDriveOAuth2Api": {"id": "rvBAumtq4Uwnq9sJ","name": "Google Drive account"}},"retryOnFail": true,"typeVersion": 3,"waitBetweenTries": 5000},{"id": "34ec291b-5bb4-42c8-9faf-73a3a6d9c16f","name": "Blockify Technical Ingest API","type": "n8n-nodes-base.httpRequest","position": [432,384],"parameters": {"url": "https://api.blockify.ai/v1/chat/completions","method": "POST","options": {},"jsonBody": "={\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": {{ JSON.stringify($json.payload) }}\n }\n ],\n \"max_tokens\": 8000,\n \"temperature\": 0.5,\n \"model\": \"technical-ingest\"\n}","sendBody": true,"specifyBody": "json","authentication": "genericCredentialType","genericAuthType": "httpBearerAuth"},"credentials": {"httpBearerAuth": {"id": "tipDu5kIivBygKXB","name": "Blockify API Production"}},"typeVersion": 4.2},{"id": "3315311a-dcd8-4be6-8124-c6832c493cf2","name": "If Completed Poller","type": "n8n-nodes-base.if","position": [-1424,304],"parameters": {"options": {},"conditions": {"options": {"version": 2,"leftValue": "","caseSensitive": true,"typeValidation": "strict"},"combinator": "and","conditions": [{"id": "325e71a3-a124-48ff-865e-ea6cafd74f28","operator": {"name": "filter.operator.equals","type": "string","operation": "equals"},"leftValue": "={{ $json.status }}","rightValue": "COMPLETED"}]}},"typeVersion": 2.2},{"id": "37674ca5-10fd-4176-a863-479588fb389e","name": "Sticky Note","type": "n8n-nodes-base.stickyNote","position": [-2160,-32],"parameters": {"color": 4,"width": 1120,"height": 672,"content": "## Extract Data from PDF Into Markdown\n\nThrough a user selected method you can extract data from the file and convert that text into a markdown output which uses ```#``` and ```##``` and ```###``` etc for headers.\n\nWe have a proprietary PDF extraction process and workflow that we've developed, however you can use whatever you like, as long as the text is output as proper markdown with headers."},"typeVersion": 1},{"id": "8069420b-ec08-4703-8908-835855fd24ae","name": "Sticky Note2","type": "n8n-nodes-base.stickyNote","position": [-3280,-32],"parameters": {"color": 4,"width": 1088,"height": 672,"content": "## Collect Source Documents and Upload to S3 and Get Signed URL for Processing\n\nExtract data and prepare to convert into markdown through any traditional methods so the information can be passed into the Blockify text based API.\n\nThis sample uses a simple .PDF file uploaded to Google Drive, but the content could be anything."},"typeVersion": 1},{"id": "f1b3ca3d-2a74-46de-a376-bf4643d0cd0d","name": "Sticky Note3","type": "n8n-nodes-base.stickyNote","position": [-1008,-32],"parameters": {"color": 4,"width": 448,"height": 672,"content": "## Parse Markdown and Split into Specific Chunks at Header Breaks\n\nChunk using a special chunking method. See Documentation provided as a note within this workflow for details in: \"Markdown Document Chunking Guidelines\" in the documentation: https://docs.google.com/document/d/14goJMiMm1gU4XWe4mWWQN5ao8dKi5bwg5zrNEZR18oc/edit?tab=t.0#heading=h.82v51btyigaq"},"typeVersion": 1},{"id": "a18b87e5-821d-47b4-85d7-01b39e54a32e","name": "Sticky Note4","type": "n8n-nodes-base.stickyNote","position": [-512,-32],"parameters": {"color": 4,"width": 560,"height": 672,"content": "## Assemble Payload for Blockify API following Specific Documentation Guidelines\n\nAssemble the chunks into a very specific payload structure which the LLM has been tuned on.\n\n```### Primary ### --- [The main content that you want to Blockify from your technical manual goes here.] --- ### Proceeding ### --- [The section that comes before the Primary Section goes here.] --- ### Following ### --- [The section that comes after the Primary Section goes here.]```"},"typeVersion": 1},{"id": "69b9a108-12e1-4859-a976-95bc26ef8bd3","name": "Sticky Note5","type": "n8n-nodes-base.stickyNote","position": [112,-32],"parameters": {"color": 4,"width": 752,"height": 672,"content": "## Run API for All Chunks and Extract the XML Blockified Result\n\n### Get your free trial API key here: https://console.blockify.ai/signup\n\nBlockify Ingest is designed to receive input raw source (parsed / chunked) text via LLM API request and output structured optimized XML “IdeaBlocks,” repackaging the source data into a cleaned format. \n\nThe process is ≈99% lossless for numerical data, facts, and key information. We always encourage a human to be in the loop to review the outputs of IdeaBlock content.\n\nFor technical manuals use the model ```technical-ingest```"},"typeVersion": 1},{"id": "b6570d29-5609-4aff-abe1-7cba1353df33","name": "Sticky Note6","type": "n8n-nodes-base.stickyNote","position": [896,-32],"parameters": {"color": 4,"width": 944,"height": 672,"content": "## Sample of Output Collection Method.\n\nFormal approach would likely leverage a database of some kind where the user parses the XML into a database row with different columns to filter the relevant information for effective RAG and Agentic processing.\n\n### NOTE YOU MAY CHOOSE A DIFFERENT OUTPUT METHOD - I.E. A DATABASE"},"typeVersion": 1},{"id": "47219d7f-6675-458f-adad-29f5fae73998","name": "Sticky Note7","type": "n8n-nodes-base.stickyNote","position": [-3616,-304],"parameters": {"color": 5,"width": 5632,"height": 1200,"content": "# Step 1: Blockify Technical Manual Ingest\n## Workflow for the Blockify Ingest Component. Does not include the Blockify Distill Component which is independent of the Ingest stage."},"typeVersion": 1},{"id": "088cc03a-29fd-4c72-94c7-9418d527b27c","name": "Sticky Note14","type": "n8n-nodes-base.stickyNote","position": [-4240,-304],"parameters": {"color": 6,"width": 592,"height": 1200,"content": "# Blockify® Data Optimization Workflow\n\n## Blockify Optimizes Data for RAG - Giving Structure to Unstructured Data for ~78X Accuracy, when pairing Blockify Ingest and Blockify Distill\n\n## Learn more at [https://iternal.ai/blockify](https://iternal.ai/blockify)\n\n### Get Free Demo API Access here: [https://console.blockify.ai/signup](https://console.blockify.ai/signup)\n\n### Read the Technical Whitepaper here: [https://iternal.ai/blockify-results](https://iternal.ai/blockify-results)\n\n### See example Accuracy Comparison here: [https://iternal.ai/case-studies/medical-accuracy/](https://iternal.ai/case-studies/medical-accuracy/)\n\nBlockify is a data optimization tool that takes messy, unstructured text, like hundreds of sales‑meeting transcripts or long proposals, and intelligently optimizes the data into small, easy‑to‑understand \"IdeaBlocks.\" Each IdeaBlock is just a couple of sentences in length that capture one clear idea, plus a built‑in contextualized question and answer.\n\nWith this approach, Blockify improves accuracy of LLMs (Large Language Models) by an average aggregate 78X, while shrinking the original mountain of text to about 2.5% of its size while keeping (and even improving) the important information.\n\nThis version of Blockify is designed specifically for Technical Manuals and data where order and sequence matters. This process of ingestion is different from the unordered ingest process also available as part of Blockify.\n\nBlockify works by processing chunks of text to create structured data from an unstructured data source.\n\nBlockify® replaces the traditional \"dump‑and‑chunk\" approach with an end‑to‑end pipeline that cleans and organizes content before it ever hits a vector store.\n\nAdmins first define who should see what, then the system ingests any file type—Word, PDF, slides, images—inside public cloud, private cloud, or on‑prem. A context‑aware splitter finds natural breaks, and a series of specially developed Blockify LLM model turns each segment into a draft IdeaBlock.\n\nGenAI systems fed with this curated data return sharper answers, hallucinate far less, and comply with security policies out of the box.\n\nThe result: higher trust, lower operating cost, and a clear path to enterprise‑scale RAG without the cleanup headaches that stall most AI rollouts."},"typeVersion": 1},{"id": "88d04eda-7ba3-4f32-a943-019622e513f2","name": "Search files and folders","type": "n8n-nodes-base.googleDrive","position": [-3216,448],"parameters": {"filter": {"folderId": {"__rl": true,"mode": "list","value": "root","cachedResultUrl": "https://drive.google.com/drive","cachedResultName": "/ (Root folder)"}},"options": {},"resource": "fileFolder","returnAll": true,"searchMethod": "query"},"credentials": {"googleDriveOAuth2Api": {"id": "rvBAumtq4Uwnq9sJ","name": "Google Drive account"}},"typeVersion": 3},{"id": "dbd37184-e2f1-4204-9262-cc1a582c5f71","name": "Loop over Documents to Blockify","type": "n8n-nodes-base.splitInBatches","position": [-2992,448],"parameters": {"options": {}},"typeVersion": 3},{"id": "5591235c-65fe-4b78-9f60-df81e4c8ddc5","name": "Download Document to Blockify","type": "n8n-nodes-base.googleDrive","position": [-2768,304],"parameters": {"fileId": {"__rl": true,"mode": "id","value": "={{ $json.id }}"},"options": {},"operation": "download"},"credentials": {"googleDriveOAuth2Api": {"id": "rvBAumtq4Uwnq9sJ","name": "Google Drive account"}},"typeVersion": 3},{"id": "dc0bc4db-3b43-4f2f-8ac5-5ad400fc23dc","name": "Upload a Document to S3","type": "n8n-nodes-base.awsS3","position": [-2560,304],"parameters": {"fileName": "={{ $json.name }}","operation": "upload","bucketName": "n8n-AWS-Bucket-Name-Here","additionalFields": {"parentFolderKey": "folder/path/here"}},"credentials": {"aws": {"id": "xk6TbbAlmWZJN2Dg","name": "AWS account"}},"typeVersion": 2},{"id": "46e95293-eb82-4c12-a1a5-109c91834453","name": "Get AWS Signed URL","type": "n8n-nodes-base.code","position": [-2352,304],"parameters": {"jsCode": "const crypto = require('crypto');\n\nfunction encodeRfc3986(str) {\n return encodeURIComponent(str).replace(/[!'()*]/g, c =>\n '%' + c.charCodeAt(0).toString(16).toUpperCase()\n );\n}\n\nfunction encodePath(key) {\n return key.split('/').map(encodeRfc3986).join('/');\n}\n\nfunction awsS3PresignDownload({\n accessKeyId,\n secretAccessKey,\n bucket,\n region,\n key,\n expires = 900,\n endpointHost,\n}) {\n if (!key || /^https?:\\/\\//i.test(key)) {\n throw new Error('key must be the S3 object key (e.g., \"folder/file.txt\"), not a full URL.');\n }\n\n const host = endpointHost\n ? endpointHost\n : (region === 'us-east-1'\n ? `${bucket}.s3.amazonaws.com`\n : `${bucket}.s3.${region}.amazonaws.com`);\n\n const now = new Date();\n const pad = n => n.toString().padStart(2, '0');\n const date = `${now.getUTCFullYear()}${pad(now.getUTCMonth() + 1)}${pad(now.getUTCDate())}`;\n const amzDate = `${date}T${pad(now.getUTCHours())}${pad(now.getUTCMinutes())}${pad(now.getUTCSeconds())}Z`;\n\n const algorithm = 'AWS4-HMAC-SHA256';\n const scope = `${date}/${region}/s3/aws4_request`;\n const signedHeaders = 'host';\n\n const canonicalUri = `/${encodePath(key)}`;\n const xAmzParams = {\n 'X-Amz-Algorithm': algorithm,\n 'X-Amz-Credential': `${accessKeyId}/${scope}`,\n 'X-Amz-Date': amzDate,\n 'X-Amz-Expires': String(expires),\n 'X-Amz-SignedHeaders': signedHeaders,\n };\n\n const canonicalQuery = Object.keys(xAmzParams)\n .sort()\n .map(k => `${encodeRfc3986(k)}=${encodeRfc3986(xAmzParams[k])}`)\n .join('&');\n\n const canonicalHeaders = `host:${host}\\n`;\n const canonicalRequest = [\n 'GET',\n canonicalUri,\n canonicalQuery,\n canonicalHeaders,\n signedHeaders,\n 'UNSIGNED-PAYLOAD',\n ].join('\\n');\n\n const hashedRequest = crypto.createHash('sha256').update(canonicalRequest, 'utf8').digest('hex');\n const stringToSign = `${algorithm}\\n${amzDate}\\n${scope}\\n${hashedRequest}`;\n\n const hmac = (key, data) => crypto.createHmac('sha256', key).update(data, 'utf8').digest();\n const kDate = hmac(`AWS4${secretAccessKey}`, date);\n const kRegion = hmac(kDate, region);\n const kService = hmac(kRegion, 's3');\n const kSigning = hmac(kService, 'aws4_request');\n\n const signature = crypto.createHmac('sha256', kSigning).update(stringToSign, 'utf8').digest('hex');\n\n return `https://${host}${canonicalUri}?${canonicalQuery}&X-Amz-Signature=${signature}`;\n}\n\n// Configuration\nconst api_key = 'XXXXXX'; \nconst secret_key = 'YYYYYYYYYYYYYYYYY'; \nconst bucket_name = 'n8n-AWS-Bucket-Name-Here';\nconst region = 'us-west-2';\n\n// Get data from the Download file1 node (which has the binary data with fileName)\nconst downloadedFiles = $('Download Document to Blockify').all();\n\n// Process each item\nconst results = [];\n\nfor (const item of downloadedFiles) {\n let fileName = null;\n \n // Get filename from binary data\n if (item.binary && item.binary.data && item.binary.data.fileName) {\n fileName = item.binary.data.fileName;\n } else if (item.binary) {\n // Try to find it in any binary property\n const binaryKeys = Object.keys(item.binary);\n for (const key of binaryKeys) {\n if (item.binary[key] && item.binary[key].fileName) {\n fileName = item.binary[key].fileName;\n break;\n }\n }\n }\n \n // Fallback to json.name if binary fileName not found\n if (!fileName && item.json && item.json.name) {\n fileName = item.json.name;\n }\n \n if (!fileName) {\n throw new Error(`Cannot find fileName. Binary keys: ${item.binary ? JSON.stringify(Object.keys(item.binary)) : 'no binary'}, JSON keys: ${JSON.stringify(Object.keys(item.json))}`);\n }\n \n // Clean the filename (remove leading slashes if any)\n const cleanFileName = fileName.replace(/^\\/+/, '');\n \n // Construct the S3 key (matches the upload configuration with inputs folder)\n const s3Key = `inputs/${cleanFileName}`;\n \n const url = awsS3PresignDownload({\n accessKeyId: api_key,\n secretAccessKey: secret_key,\n bucket: bucket_name,\n region: region,\n key: s3Key,\n expires: 8400,\n });\n \n results.push({\n json: {\n url: url,\n fileName: cleanFileName,\n s3Key: s3Key\n }\n });\n}\n\nreturn results;"},"typeVersion": 2},{"id": "f2a19262-32bd-44d1-ae9f-9461def0abc7","name": "Download Final PDF Extracted Text Output","type": "n8n-nodes-base.httpRequest","position": [-1200,304],"parameters": {"url": "=url-for-google-gemini-pdf-processing/{{ $json.job_id }}","options": {"redirect": {}},"sendHeaders": true,"headerParameters": {"parameters": [{"name": "x-api-key","value": "xxxxx"}]}},"typeVersion": 4.2},{"id": "00458b88-9b89-4d01-ae14-b51acfc4963d","name": "Extract Raw Markdown from PDF Text","type": "n8n-nodes-base.set","position": [-944,304],"parameters": {"options": {},"assignments": {"assignments": [{"id": "10b6a1df-b189-4ebb-9fa8-2fcedb14509f","name": "output","type": "string","value": "={{ $json.data }}"}]}},"typeVersion": 3.4},{"id": "eb137817-7a79-4f2a-962c-a661996a5117","name": "Loop Over Chunks for Blockify","type": "n8n-nodes-base.splitInBatches","position": [208,304],"parameters": {"options": {}},"typeVersion": 3},{"id": "03befca6-bbc3-4301-a5c1-7f0fa919f54f","name": "Aggregate all Manual Sections","type": "n8n-nodes-base.aggregate","position": [976,208],"parameters": {"options": {},"aggregate": "aggregateAllItemData","destinationFieldName": "manual-sections"},"typeVersion": 1}],"pinData": {},"connections": {"Wait": {"main": [[{"node": "If Completed Poller","type": "main","index": 0}]]},"Convert to File": {"main": [[{"node": "Upload Blockified Manual","type": "main","index": 0}]]},"Schedule Trigger2": {"main": [[{"node": "Search files and folders","type": "main","index": 0}]]},"Get AWS Signed URL": {"main": [[{"node": "Initiate PDF Extraction","type": "main","index": 0}]]},"PDF Status Polling": {"main": [[{"node": "Wait","type": "main","index": 0}]]},"If Completed Poller": {"main": [[{"node": "Download Final PDF Extracted Text Output","type": "main","index": 0}],[{"node": "PDF Status Polling","type": "main","index": 0}]]},"Initiate PDF Extraction": {"main": [[{"node": "PDF Status Polling","type": "main","index": 0}]]},"Upload a Document to S3": {"main": [[{"node": "Get AWS Signed URL","type": "main","index": 0}]]},"Search files and folders": {"main": [[{"node": "Loop over Documents to Blockify","type": "main","index": 0}]]},"Upload Blockified Manual": {"main": [[{"node": "Loop over Documents to Blockify","type": "main","index": 0}]]},"Aggregate all Manual Sections": {"main": [[{"node": "Strip and Clean to Aggregate XML","type": "main","index": 0}]]},"Blockify Technical Ingest API": {"main": [[{"node": "Extract IdeaBlocks from API Response","type": "main","index": 0}]]},"Download Document to Blockify": {"main": [[{"node": "Upload a Document to S3","type": "main","index": 0}]]},"Loop Over Chunks for Blockify": {"main": [[{"node": "Aggregate all Manual Sections","type": "main","index": 0}],[{"node": "Blockify Technical Ingest API","type": "main","index": 0}]]},"Technical Manual Split Chunks": {"main": [[{"node": "Technical Manual Prompt Payload Assembly","type": "main","index": 0}]]},"Loop over Documents to Blockify": {"main": [[],[{"node": "Download Document to Blockify","type": "main","index": 0}]]},"Strip and Clean to Aggregate XML": {"main": [[{"node": "Convert to File","type": "main","index": 0}]]},"Extract Raw Markdown from PDF Text": {"main": [[{"node": "Technical Manual Split Chunks","type": "main","index": 0}]]},"Extract IdeaBlocks from API Response": {"main": [[{"node": "Loop Over Chunks for Blockify","type": "main","index": 0}]]},"Download Final PDF Extracted Text Output": {"main": [[{"node": "Extract Raw Markdown from PDF Text","type": "main","index": 0}]]},"Technical Manual Prompt Payload Assembly": {"main": [[{"node": "Loop Over Chunks for Blockify","type": "main","index": 0}]]}}}
في محرر n8n: الصق باستخدام Ctrl+V→سيتم إنشاء سير العمل