读取前端所有的js文件,从而获取前端调取后端的所有接口,发送请求

import json
import os
import re

import pymysql
import requests


class UrlFetcher:
    def __init__(self, db_config, host, headers):
        self.db_config = db_config  # 数据库配置
        self.host = host  # 请求的host
        self.headers = headers  # 请求头

    def fetch_urls(self, url_sources):
        """
        从URL源获取URL列表
        """
        urls = []
        for source in url_sources:
            # 如果是数据库,则从数据库中获取
            if source['type'] == 'db':
                db = pymysql.Connect(**source['config'])
                cur = db.cursor(pymysql.cursors.DictCursor)
                sql = source['query']
                cur.execute(sql)
                urls.extend(list(map(lambda value: value['menu_url'], cur.fetchall())))
                cur.close()
                db.close()

            # 如果是文件夹,从JS文件中提取URL
            elif source['type'] == 'folder':
                dir_path = source['path']
                url_pattern = source['pattern']
                urls.extend(self.extract_urls_from_js_files(dir_path, url_pattern))

            # 如果是单个URL,直接加入列表
            elif source['type'] == 'url':
                urls.append(source['url'])

        return urls

    def send_request(self, url):
        """
        发送请求获取响应
        """
        session = requests.session()  # 创建session对象
        response = session.post(self.host + url, data={}, headers=self.headers)  # 发送post请求
        session.close()  # 关闭session
        return response

    def write_response_to_file(self, data):
        """
        将响应写入文件追加换行写入
        """
        try:
            with open('files/output.txt', 'a', encoding='utf-8', buffering=1) as file:
                file.write("\n")
                file.writelines(
                    str(data['id']) + data['menu_url'] + '   ' + json.dumps(data['response'], ensure_ascii=False))
        except Exception as e:
            print(e)

    def process_urls(self, url_sources):
        """
        处理URL列表
        """
        urls = self.fetch_urls(url_sources)  # 获取URL列表
        with open('files/output.txt', 'w') as f:
            f.truncate(0)  # 清空文件内容
        for menu_url in urls:
            response = self.send_request(menu_url)  # 发送请求
            try:
                response_json = response.json()  # 将响应转为json
                id = response_json['id']
                print(f'结果值id{id}为', response_json)
                self.write_response_to_file({'id': id, 'menu_url': menu_url, 'response': response_json})  # 将结果写入文件
            except Exception as e:
                print(f'URL {menu_url} 的结果无法解析为json', response.text)

    def extract_urls_from_js_files(self, dir_path, url_pattern):
        """
        从JS文件中提取URL
        """
        urls = []
        # 遍历目录下的所有文件
        for file_name in os.listdir(dir_path):
            # 如果是js文件,读取文件内容并获取URL
            if os.path.splitext(file_name)[-1] == '.js':
                file_path = os.path.join(dir_path, file_name)
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = f.read()
                urls_in_file = re.findall(url_pattern, data)  # 查找URL
                urls.extend(list(map(lambda url: url.strip(), urls_in_file)))

        return urls

    def run(self, url_sources):
        """
        运行模块
        """
        self.process_urls(url_sources)  # 处理URL列表


if __name__ == '__main__':
    db_config = {
        'host': 'rncs.com',
        'port': 3306,
        'user': 'yai',
        'password': '456',
        'db': 'test',
        'charset': 'utf8'
    }
    headers = {"Content-Type": "application/json;charset=UTF-8", "token": 'D237F54160BF73ED489871C8996BB646'}
    url_sources = [
        {'type': 'url', 'url': 'http://test.com/api/abc'},
        {'type': 'db', 'config': db_config, 'query': 'SELECT menu_url FROM yy_menu_url WHERE menu_pid != 0'},
        {'type': 'folder', 'path': 'files', 'pattern': r"url:\s*'(.+?)'"},
    ]
    url_fetcher = UrlFetcher(db_config, 'http://test.com', headers)
    url_fetcher.run(url_sources)


点击阅读全文
Logo

腾讯云面向开发者汇聚海量精品云计算使用和开发经验,营造开放的云计算技术生态圈。

更多推荐